1 /* NAT for netfilter; shared with compatibility layer. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/vmalloc.h>
16 #include <net/checksum.h>
19 #include <net/tcp.h> /* For tcp_prot in getorigdst */
20 #include <linux/icmp.h>
21 #include <linux/udp.h>
22 #include <linux/jhash.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
31 #include <net/netfilter/nf_conntrack_helper.h>
32 #include <net/netfilter/nf_conntrack_l3proto.h>
33 #include <net/netfilter/nf_conntrack_l4proto.h>
38 #define DEBUGP(format, args...)
41 static DEFINE_RWLOCK(nf_nat_lock
);
43 static struct nf_conntrack_l3proto
*l3proto
= NULL
;
45 /* Calculated at init based on memory size */
46 static unsigned int nf_nat_htable_size
;
48 static struct list_head
*bysource
;
50 #define MAX_IP_NAT_PROTO 256
51 static struct nf_nat_protocol
*nf_nat_protos
[MAX_IP_NAT_PROTO
];
53 static inline struct nf_nat_protocol
*
54 __nf_nat_proto_find(u_int8_t protonum
)
56 return nf_nat_protos
[protonum
];
59 struct nf_nat_protocol
*
60 nf_nat_proto_find_get(u_int8_t protonum
)
62 struct nf_nat_protocol
*p
;
64 /* we need to disable preemption to make sure 'p' doesn't get
65 * removed until we've grabbed the reference */
67 p
= __nf_nat_proto_find(protonum
);
68 if (!try_module_get(p
->me
))
69 p
= &nf_nat_unknown_protocol
;
74 EXPORT_SYMBOL_GPL(nf_nat_proto_find_get
);
77 nf_nat_proto_put(struct nf_nat_protocol
*p
)
81 EXPORT_SYMBOL_GPL(nf_nat_proto_put
);
83 /* We keep an extra hash for each conntrack, for fast searching. */
84 static inline unsigned int
85 hash_by_src(const struct nf_conntrack_tuple
*tuple
)
87 /* Original src, to ensure we map it consistently if poss. */
88 return jhash_3words((__force u32
)tuple
->src
.u3
.ip
, tuple
->src
.u
.all
,
89 tuple
->dst
.protonum
, 0) % nf_nat_htable_size
;
92 /* Noone using conntrack by the time this called. */
93 static void nf_nat_cleanup_conntrack(struct nf_conn
*conn
)
95 struct nf_conn_nat
*nat
;
96 if (!(conn
->status
& IPS_NAT_DONE_MASK
))
100 write_lock_bh(&nf_nat_lock
);
101 list_del(&nat
->info
.bysource
);
102 write_unlock_bh(&nf_nat_lock
);
105 /* Is this tuple already taken? (not by us) */
107 nf_nat_used_tuple(const struct nf_conntrack_tuple
*tuple
,
108 const struct nf_conn
*ignored_conntrack
)
110 /* Conntrack tracking doesn't keep track of outgoing tuples; only
111 incoming ones. NAT means they don't have a fixed mapping,
112 so we invert the tuple and look for the incoming reply.
114 We could keep a separate hash if this proves too slow. */
115 struct nf_conntrack_tuple reply
;
117 nf_ct_invert_tuplepr(&reply
, tuple
);
118 return nf_conntrack_tuple_taken(&reply
, ignored_conntrack
);
120 EXPORT_SYMBOL(nf_nat_used_tuple
);
122 /* If we source map this tuple so reply looks like reply_tuple, will
123 * that meet the constraints of range. */
125 in_range(const struct nf_conntrack_tuple
*tuple
,
126 const struct nf_nat_range
*range
)
128 struct nf_nat_protocol
*proto
;
130 proto
= __nf_nat_proto_find(tuple
->dst
.protonum
);
131 /* If we are supposed to map IPs, then we must be in the
132 range specified, otherwise let this drag us onto a new src IP. */
133 if (range
->flags
& IP_NAT_RANGE_MAP_IPS
) {
134 if (ntohl(tuple
->src
.u3
.ip
) < ntohl(range
->min_ip
) ||
135 ntohl(tuple
->src
.u3
.ip
) > ntohl(range
->max_ip
))
139 if (!(range
->flags
& IP_NAT_RANGE_PROTO_SPECIFIED
) ||
140 proto
->in_range(tuple
, IP_NAT_MANIP_SRC
,
141 &range
->min
, &range
->max
))
148 same_src(const struct nf_conn
*ct
,
149 const struct nf_conntrack_tuple
*tuple
)
151 const struct nf_conntrack_tuple
*t
;
153 t
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
154 return (t
->dst
.protonum
== tuple
->dst
.protonum
&&
155 t
->src
.u3
.ip
== tuple
->src
.u3
.ip
&&
156 t
->src
.u
.all
== tuple
->src
.u
.all
);
159 /* Only called for SRC manip */
161 find_appropriate_src(const struct nf_conntrack_tuple
*tuple
,
162 struct nf_conntrack_tuple
*result
,
163 const struct nf_nat_range
*range
)
165 unsigned int h
= hash_by_src(tuple
);
166 struct nf_conn_nat
*nat
;
169 read_lock_bh(&nf_nat_lock
);
170 list_for_each_entry(nat
, &bysource
[h
], info
.bysource
) {
171 ct
= (struct nf_conn
*)((char *)nat
- offsetof(struct nf_conn
, data
));
172 if (same_src(ct
, tuple
)) {
173 /* Copy source part from reply tuple. */
174 nf_ct_invert_tuplepr(result
,
175 &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
176 result
->dst
= tuple
->dst
;
178 if (in_range(result
, range
)) {
179 read_unlock_bh(&nf_nat_lock
);
184 read_unlock_bh(&nf_nat_lock
);
188 /* For [FUTURE] fragmentation handling, we want the least-used
189 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
190 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
191 1-65535, we don't do pro-rata allocation based on ports; we choose
192 the ip with the lowest src-ip/dst-ip/proto usage.
195 find_best_ips_proto(struct nf_conntrack_tuple
*tuple
,
196 const struct nf_nat_range
*range
,
197 const struct nf_conn
*ct
,
198 enum nf_nat_manip_type maniptype
)
202 u_int32_t minip
, maxip
, j
;
204 /* No IP mapping? Do nothing. */
205 if (!(range
->flags
& IP_NAT_RANGE_MAP_IPS
))
208 if (maniptype
== IP_NAT_MANIP_SRC
)
209 var_ipp
= &tuple
->src
.u3
.ip
;
211 var_ipp
= &tuple
->dst
.u3
.ip
;
213 /* Fast path: only one choice. */
214 if (range
->min_ip
== range
->max_ip
) {
215 *var_ipp
= range
->min_ip
;
219 /* Hashing source and destination IPs gives a fairly even
220 * spread in practice (if there are a small number of IPs
221 * involved, there usually aren't that many connections
222 * anyway). The consistency means that servers see the same
223 * client coming from the same IP (some Internet Banking sites
224 * like this), even across reboots. */
225 minip
= ntohl(range
->min_ip
);
226 maxip
= ntohl(range
->max_ip
);
227 j
= jhash_2words((__force u32
)tuple
->src
.u3
.ip
,
228 (__force u32
)tuple
->dst
.u3
.ip
, 0);
229 *var_ipp
= htonl(minip
+ j
% (maxip
- minip
+ 1));
232 /* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING,
233 * we change the source to map into the range. For NF_IP_PRE_ROUTING
234 * and NF_IP_LOCAL_OUT, we change the destination to map into the
235 * range. It might not be possible to get a unique tuple, but we try.
236 * At worst (or if we race), we will end up with a final duplicate in
237 * __ip_conntrack_confirm and drop the packet. */
239 get_unique_tuple(struct nf_conntrack_tuple
*tuple
,
240 const struct nf_conntrack_tuple
*orig_tuple
,
241 const struct nf_nat_range
*range
,
243 enum nf_nat_manip_type maniptype
)
245 struct nf_nat_protocol
*proto
;
247 /* 1) If this srcip/proto/src-proto-part is currently mapped,
248 and that same mapping gives a unique tuple within the given
251 This is only required for source (ie. NAT/masq) mappings.
252 So far, we don't do local source mappings, so multiple
253 manips not an issue. */
254 if (maniptype
== IP_NAT_MANIP_SRC
) {
255 if (find_appropriate_src(orig_tuple
, tuple
, range
)) {
256 DEBUGP("get_unique_tuple: Found current src map\n");
257 if (!(range
->flags
& IP_NAT_RANGE_PROTO_RANDOM
))
258 if (!nf_nat_used_tuple(tuple
, ct
))
263 /* 2) Select the least-used IP/proto combination in the given
265 *tuple
= *orig_tuple
;
266 find_best_ips_proto(tuple
, range
, ct
, maniptype
);
268 /* 3) The per-protocol part of the manip is made to map into
269 the range to make a unique tuple. */
271 proto
= nf_nat_proto_find_get(orig_tuple
->dst
.protonum
);
273 /* Change protocol info to have some randomization */
274 if (range
->flags
& IP_NAT_RANGE_PROTO_RANDOM
) {
275 proto
->unique_tuple(tuple
, range
, maniptype
, ct
);
276 nf_nat_proto_put(proto
);
280 /* Only bother mapping if it's not already in range and unique */
281 if ((!(range
->flags
& IP_NAT_RANGE_PROTO_SPECIFIED
) ||
282 proto
->in_range(tuple
, maniptype
, &range
->min
, &range
->max
)) &&
283 !nf_nat_used_tuple(tuple
, ct
)) {
284 nf_nat_proto_put(proto
);
288 /* Last change: get protocol to try to obtain unique tuple. */
289 proto
->unique_tuple(tuple
, range
, maniptype
, ct
);
291 nf_nat_proto_put(proto
);
295 nf_nat_setup_info(struct nf_conn
*ct
,
296 const struct nf_nat_range
*range
,
297 unsigned int hooknum
)
299 struct nf_conntrack_tuple curr_tuple
, new_tuple
;
300 struct nf_conn_nat
*nat
= nfct_nat(ct
);
301 struct nf_nat_info
*info
= &nat
->info
;
302 int have_to_hash
= !(ct
->status
& IPS_NAT_DONE_MASK
);
303 enum nf_nat_manip_type maniptype
= HOOK2MANIP(hooknum
);
305 NF_CT_ASSERT(hooknum
== NF_IP_PRE_ROUTING
||
306 hooknum
== NF_IP_POST_ROUTING
||
307 hooknum
== NF_IP_LOCAL_IN
||
308 hooknum
== NF_IP_LOCAL_OUT
);
309 BUG_ON(nf_nat_initialized(ct
, maniptype
));
311 /* What we've got will look like inverse of reply. Normally
312 this is what is in the conntrack, except for prior
313 manipulations (future optimization: if num_manips == 0,
315 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
316 nf_ct_invert_tuplepr(&curr_tuple
,
317 &ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
319 get_unique_tuple(&new_tuple
, &curr_tuple
, range
, ct
, maniptype
);
321 if (!nf_ct_tuple_equal(&new_tuple
, &curr_tuple
)) {
322 struct nf_conntrack_tuple reply
;
324 /* Alter conntrack table so will recognize replies. */
325 nf_ct_invert_tuplepr(&reply
, &new_tuple
);
326 nf_conntrack_alter_reply(ct
, &reply
);
328 /* Non-atomic: we own this at the moment. */
329 if (maniptype
== IP_NAT_MANIP_SRC
)
330 ct
->status
|= IPS_SRC_NAT
;
332 ct
->status
|= IPS_DST_NAT
;
335 /* Place in source hash if this is the first time. */
337 unsigned int srchash
;
339 srchash
= hash_by_src(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
340 write_lock_bh(&nf_nat_lock
);
341 list_add(&info
->bysource
, &bysource
[srchash
]);
342 write_unlock_bh(&nf_nat_lock
);
346 if (maniptype
== IP_NAT_MANIP_DST
)
347 set_bit(IPS_DST_NAT_DONE_BIT
, &ct
->status
);
349 set_bit(IPS_SRC_NAT_DONE_BIT
, &ct
->status
);
353 EXPORT_SYMBOL(nf_nat_setup_info
);
355 /* Returns true if succeeded. */
357 manip_pkt(u_int16_t proto
,
358 struct sk_buff
**pskb
,
359 unsigned int iphdroff
,
360 const struct nf_conntrack_tuple
*target
,
361 enum nf_nat_manip_type maniptype
)
364 struct nf_nat_protocol
*p
;
366 if (!skb_make_writable(pskb
, iphdroff
+ sizeof(*iph
)))
369 iph
= (void *)(*pskb
)->data
+ iphdroff
;
371 /* Manipulate protcol part. */
372 p
= nf_nat_proto_find_get(proto
);
373 if (!p
->manip_pkt(pskb
, iphdroff
, target
, maniptype
)) {
379 iph
= (void *)(*pskb
)->data
+ iphdroff
;
381 if (maniptype
== IP_NAT_MANIP_SRC
) {
382 nf_csum_replace4(&iph
->check
, iph
->saddr
, target
->src
.u3
.ip
);
383 iph
->saddr
= target
->src
.u3
.ip
;
385 nf_csum_replace4(&iph
->check
, iph
->daddr
, target
->dst
.u3
.ip
);
386 iph
->daddr
= target
->dst
.u3
.ip
;
391 /* Do packet manipulations according to nf_nat_setup_info. */
392 unsigned int nf_nat_packet(struct nf_conn
*ct
,
393 enum ip_conntrack_info ctinfo
,
394 unsigned int hooknum
,
395 struct sk_buff
**pskb
)
397 enum ip_conntrack_dir dir
= CTINFO2DIR(ctinfo
);
398 unsigned long statusbit
;
399 enum nf_nat_manip_type mtype
= HOOK2MANIP(hooknum
);
401 if (mtype
== IP_NAT_MANIP_SRC
)
402 statusbit
= IPS_SRC_NAT
;
404 statusbit
= IPS_DST_NAT
;
406 /* Invert if this is reply dir. */
407 if (dir
== IP_CT_DIR_REPLY
)
408 statusbit
^= IPS_NAT_MASK
;
410 /* Non-atomic: these bits don't change. */
411 if (ct
->status
& statusbit
) {
412 struct nf_conntrack_tuple target
;
414 /* We are aiming to look like inverse of other direction. */
415 nf_ct_invert_tuplepr(&target
, &ct
->tuplehash
[!dir
].tuple
);
417 if (!manip_pkt(target
.dst
.protonum
, pskb
, 0, &target
, mtype
))
422 EXPORT_SYMBOL_GPL(nf_nat_packet
);
424 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
425 int nf_nat_icmp_reply_translation(struct nf_conn
*ct
,
426 enum ip_conntrack_info ctinfo
,
427 unsigned int hooknum
,
428 struct sk_buff
**pskb
)
434 struct nf_conntrack_tuple inner
, target
;
435 int hdrlen
= (*pskb
)->nh
.iph
->ihl
* 4;
436 enum ip_conntrack_dir dir
= CTINFO2DIR(ctinfo
);
437 unsigned long statusbit
;
438 enum nf_nat_manip_type manip
= HOOK2MANIP(hooknum
);
440 if (!skb_make_writable(pskb
, hdrlen
+ sizeof(*inside
)))
443 inside
= (void *)(*pskb
)->data
+ (*pskb
)->nh
.iph
->ihl
*4;
445 /* We're actually going to mangle it beyond trivial checksum
446 adjustment, so make sure the current checksum is correct. */
447 if (nf_ip_checksum(*pskb
, hooknum
, hdrlen
, 0))
450 /* Must be RELATED */
451 NF_CT_ASSERT((*pskb
)->nfctinfo
== IP_CT_RELATED
||
452 (*pskb
)->nfctinfo
== IP_CT_RELATED
+IP_CT_IS_REPLY
);
454 /* Redirects on non-null nats must be dropped, else they'll
455 start talking to each other without our translation, and be
457 if (inside
->icmp
.type
== ICMP_REDIRECT
) {
458 /* If NAT isn't finished, assume it and drop. */
459 if ((ct
->status
& IPS_NAT_DONE_MASK
) != IPS_NAT_DONE_MASK
)
462 if (ct
->status
& IPS_NAT_MASK
)
466 DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
467 *pskb
, manip
, dir
== IP_CT_DIR_ORIGINAL
? "ORIG" : "REPLY");
469 if (!nf_ct_get_tuple(*pskb
,
470 (*pskb
)->nh
.iph
->ihl
*4 + sizeof(struct icmphdr
),
471 (*pskb
)->nh
.iph
->ihl
*4 +
472 sizeof(struct icmphdr
) + inside
->ip
.ihl
*4,
477 __nf_ct_l4proto_find((u_int16_t
)PF_INET
,
478 inside
->ip
.protocol
)))
481 /* Change inner back to look like incoming packet. We do the
482 opposite manip on this hook to normal, because it might not
483 pass all hooks (locally-generated ICMP). Consider incoming
484 packet: PREROUTING (DST manip), routing produces ICMP, goes
485 through POSTROUTING (which must correct the DST manip). */
486 if (!manip_pkt(inside
->ip
.protocol
, pskb
,
487 (*pskb
)->nh
.iph
->ihl
*4 + sizeof(inside
->icmp
),
488 &ct
->tuplehash
[!dir
].tuple
,
492 if ((*pskb
)->ip_summed
!= CHECKSUM_PARTIAL
) {
493 /* Reloading "inside" here since manip_pkt inner. */
494 inside
= (void *)(*pskb
)->data
+ (*pskb
)->nh
.iph
->ihl
*4;
495 inside
->icmp
.checksum
= 0;
496 inside
->icmp
.checksum
=
497 csum_fold(skb_checksum(*pskb
, hdrlen
,
498 (*pskb
)->len
- hdrlen
, 0));
501 /* Change outer to look the reply to an incoming packet
502 * (proto 0 means don't invert per-proto part). */
503 if (manip
== IP_NAT_MANIP_SRC
)
504 statusbit
= IPS_SRC_NAT
;
506 statusbit
= IPS_DST_NAT
;
508 /* Invert if this is reply dir. */
509 if (dir
== IP_CT_DIR_REPLY
)
510 statusbit
^= IPS_NAT_MASK
;
512 if (ct
->status
& statusbit
) {
513 nf_ct_invert_tuplepr(&target
, &ct
->tuplehash
[!dir
].tuple
);
514 if (!manip_pkt(0, pskb
, 0, &target
, manip
))
520 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation
);
522 /* Protocol registration. */
523 int nf_nat_protocol_register(struct nf_nat_protocol
*proto
)
527 write_lock_bh(&nf_nat_lock
);
528 if (nf_nat_protos
[proto
->protonum
] != &nf_nat_unknown_protocol
) {
532 nf_nat_protos
[proto
->protonum
] = proto
;
534 write_unlock_bh(&nf_nat_lock
);
537 EXPORT_SYMBOL(nf_nat_protocol_register
);
539 /* Noone stores the protocol anywhere; simply delete it. */
540 void nf_nat_protocol_unregister(struct nf_nat_protocol
*proto
)
542 write_lock_bh(&nf_nat_lock
);
543 nf_nat_protos
[proto
->protonum
] = &nf_nat_unknown_protocol
;
544 write_unlock_bh(&nf_nat_lock
);
546 /* Someone could be still looking at the proto in a bh. */
549 EXPORT_SYMBOL(nf_nat_protocol_unregister
);
551 #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
552 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
554 nf_nat_port_range_to_nfattr(struct sk_buff
*skb
,
555 const struct nf_nat_range
*range
)
557 NFA_PUT(skb
, CTA_PROTONAT_PORT_MIN
, sizeof(__be16
),
558 &range
->min
.tcp
.port
);
559 NFA_PUT(skb
, CTA_PROTONAT_PORT_MAX
, sizeof(__be16
),
560 &range
->max
.tcp
.port
);
567 EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range
);
570 nf_nat_port_nfattr_to_range(struct nfattr
*tb
[], struct nf_nat_range
*range
)
574 /* we have to return whether we actually parsed something or not */
576 if (tb
[CTA_PROTONAT_PORT_MIN
-1]) {
578 range
->min
.tcp
.port
=
579 *(__be16
*)NFA_DATA(tb
[CTA_PROTONAT_PORT_MIN
-1]);
582 if (!tb
[CTA_PROTONAT_PORT_MAX
-1]) {
584 range
->max
.tcp
.port
= range
->min
.tcp
.port
;
587 range
->max
.tcp
.port
=
588 *(__be16
*)NFA_DATA(tb
[CTA_PROTONAT_PORT_MAX
-1]);
593 EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr
);
596 static int __init
nf_nat_init(void)
600 /* Leave them the same for the moment. */
601 nf_nat_htable_size
= nf_conntrack_htable_size
;
603 /* One vmalloc for both hash tables */
604 bysource
= vmalloc(sizeof(struct list_head
) * nf_nat_htable_size
);
608 /* Sew in builtin protocols. */
609 write_lock_bh(&nf_nat_lock
);
610 for (i
= 0; i
< MAX_IP_NAT_PROTO
; i
++)
611 nf_nat_protos
[i
] = &nf_nat_unknown_protocol
;
612 nf_nat_protos
[IPPROTO_TCP
] = &nf_nat_protocol_tcp
;
613 nf_nat_protos
[IPPROTO_UDP
] = &nf_nat_protocol_udp
;
614 nf_nat_protos
[IPPROTO_ICMP
] = &nf_nat_protocol_icmp
;
615 write_unlock_bh(&nf_nat_lock
);
617 for (i
= 0; i
< nf_nat_htable_size
; i
++) {
618 INIT_LIST_HEAD(&bysource
[i
]);
621 /* FIXME: Man, this is a hack. <SIGH> */
622 NF_CT_ASSERT(nf_conntrack_destroyed
== NULL
);
623 nf_conntrack_destroyed
= &nf_nat_cleanup_conntrack
;
625 /* Initialize fake conntrack so that NAT will skip it */
626 nf_conntrack_untracked
.status
|= IPS_NAT_DONE_MASK
;
628 l3proto
= nf_ct_l3proto_find_get((u_int16_t
)AF_INET
);
632 /* Clear NAT section of all conntracks, in case we're loaded again. */
633 static int clean_nat(struct nf_conn
*i
, void *data
)
635 struct nf_conn_nat
*nat
= nfct_nat(i
);
639 memset(nat
, 0, sizeof(nat
));
640 i
->status
&= ~(IPS_NAT_MASK
| IPS_NAT_DONE_MASK
| IPS_SEQ_ADJUST
);
644 static void __exit
nf_nat_cleanup(void)
646 nf_ct_iterate_cleanup(&clean_nat
, NULL
);
647 nf_conntrack_destroyed
= NULL
;
649 nf_ct_l3proto_put(l3proto
);
652 MODULE_LICENSE("GPL");
654 module_init(nf_nat_init
);
655 module_exit(nf_nat_cleanup
);