[NETFILTER]: nf_nat: merge nf_conn and nf_nat_info
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / netfilter / nf_nat_core.c
CommitLineData
5b1158e9
JK
1/* NAT for netfilter; shared with compatibility layer. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
15#include <linux/vmalloc.h>
16#include <net/checksum.h>
17#include <net/icmp.h>
18#include <net/ip.h>
19#include <net/tcp.h> /* For tcp_prot in getorigdst */
20#include <linux/icmp.h>
21#include <linux/udp.h>
22#include <linux/jhash.h>
23
24#include <linux/netfilter_ipv4.h>
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_core.h>
27#include <net/netfilter/nf_nat.h>
28#include <net/netfilter/nf_nat_protocol.h>
29#include <net/netfilter/nf_nat_core.h>
30#include <net/netfilter/nf_nat_helper.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_l3proto.h>
33#include <net/netfilter/nf_conntrack_l4proto.h>
34
35#if 0
36#define DEBUGP printk
37#else
38#define DEBUGP(format, args...)
39#endif
40
41static DEFINE_RWLOCK(nf_nat_lock);
42
43static struct nf_conntrack_l3proto *l3proto = NULL;
44
45/* Calculated at init based on memory size */
46static unsigned int nf_nat_htable_size;
47
48static struct list_head *bysource;
49
50#define MAX_IP_NAT_PROTO 256
51static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
52
53static inline struct nf_nat_protocol *
54__nf_nat_proto_find(u_int8_t protonum)
55{
e22a0548 56 return rcu_dereference(nf_nat_protos[protonum]);
5b1158e9
JK
57}
58
59struct nf_nat_protocol *
60nf_nat_proto_find_get(u_int8_t protonum)
61{
62 struct nf_nat_protocol *p;
63
e22a0548 64 rcu_read_lock();
5b1158e9
JK
65 p = __nf_nat_proto_find(protonum);
66 if (!try_module_get(p->me))
67 p = &nf_nat_unknown_protocol;
e22a0548 68 rcu_read_unlock();
5b1158e9
JK
69
70 return p;
71}
72EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
73
74void
75nf_nat_proto_put(struct nf_nat_protocol *p)
76{
77 module_put(p->me);
78}
79EXPORT_SYMBOL_GPL(nf_nat_proto_put);
80
81/* We keep an extra hash for each conntrack, for fast searching. */
82static inline unsigned int
83hash_by_src(const struct nf_conntrack_tuple *tuple)
84{
85 /* Original src, to ensure we map it consistently if poss. */
86 return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
87 tuple->dst.protonum, 0) % nf_nat_htable_size;
88}
89
5b1158e9
JK
90/* Is this tuple already taken? (not by us) */
91int
92nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
93 const struct nf_conn *ignored_conntrack)
94{
95 /* Conntrack tracking doesn't keep track of outgoing tuples; only
96 incoming ones. NAT means they don't have a fixed mapping,
97 so we invert the tuple and look for the incoming reply.
98
99 We could keep a separate hash if this proves too slow. */
100 struct nf_conntrack_tuple reply;
101
102 nf_ct_invert_tuplepr(&reply, tuple);
103 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
104}
105EXPORT_SYMBOL(nf_nat_used_tuple);
106
107/* If we source map this tuple so reply looks like reply_tuple, will
108 * that meet the constraints of range. */
109static int
110in_range(const struct nf_conntrack_tuple *tuple,
111 const struct nf_nat_range *range)
112{
113 struct nf_nat_protocol *proto;
e22a0548 114 int ret = 0;
5b1158e9 115
5b1158e9
JK
116 /* If we are supposed to map IPs, then we must be in the
117 range specified, otherwise let this drag us onto a new src IP. */
118 if (range->flags & IP_NAT_RANGE_MAP_IPS) {
119 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
120 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
121 return 0;
122 }
123
e22a0548
PM
124 rcu_read_lock();
125 proto = __nf_nat_proto_find(tuple->dst.protonum);
5b1158e9
JK
126 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
127 proto->in_range(tuple, IP_NAT_MANIP_SRC,
128 &range->min, &range->max))
e22a0548
PM
129 ret = 1;
130 rcu_read_unlock();
5b1158e9 131
e22a0548 132 return ret;
5b1158e9
JK
133}
134
135static inline int
136same_src(const struct nf_conn *ct,
137 const struct nf_conntrack_tuple *tuple)
138{
139 const struct nf_conntrack_tuple *t;
140
141 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
142 return (t->dst.protonum == tuple->dst.protonum &&
143 t->src.u3.ip == tuple->src.u3.ip &&
144 t->src.u.all == tuple->src.u.all);
145}
146
147/* Only called for SRC manip */
148static int
149find_appropriate_src(const struct nf_conntrack_tuple *tuple,
150 struct nf_conntrack_tuple *result,
151 const struct nf_nat_range *range)
152{
153 unsigned int h = hash_by_src(tuple);
154 struct nf_conn_nat *nat;
155 struct nf_conn *ct;
156
157 read_lock_bh(&nf_nat_lock);
b6b84d4a
YK
158 list_for_each_entry(nat, &bysource[h], bysource) {
159 ct = nat->ct;
5b1158e9
JK
160 if (same_src(ct, tuple)) {
161 /* Copy source part from reply tuple. */
162 nf_ct_invert_tuplepr(result,
163 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
164 result->dst = tuple->dst;
165
166 if (in_range(result, range)) {
167 read_unlock_bh(&nf_nat_lock);
168 return 1;
169 }
170 }
171 }
172 read_unlock_bh(&nf_nat_lock);
173 return 0;
174}
175
176/* For [FUTURE] fragmentation handling, we want the least-used
177 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
178 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
179 1-65535, we don't do pro-rata allocation based on ports; we choose
180 the ip with the lowest src-ip/dst-ip/proto usage.
181*/
182static void
183find_best_ips_proto(struct nf_conntrack_tuple *tuple,
184 const struct nf_nat_range *range,
185 const struct nf_conn *ct,
186 enum nf_nat_manip_type maniptype)
187{
188 __be32 *var_ipp;
189 /* Host order */
190 u_int32_t minip, maxip, j;
191
192 /* No IP mapping? Do nothing. */
193 if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
194 return;
195
196 if (maniptype == IP_NAT_MANIP_SRC)
197 var_ipp = &tuple->src.u3.ip;
198 else
199 var_ipp = &tuple->dst.u3.ip;
200
201 /* Fast path: only one choice. */
202 if (range->min_ip == range->max_ip) {
203 *var_ipp = range->min_ip;
204 return;
205 }
206
207 /* Hashing source and destination IPs gives a fairly even
208 * spread in practice (if there are a small number of IPs
209 * involved, there usually aren't that many connections
210 * anyway). The consistency means that servers see the same
211 * client coming from the same IP (some Internet Banking sites
212 * like this), even across reboots. */
213 minip = ntohl(range->min_ip);
214 maxip = ntohl(range->max_ip);
215 j = jhash_2words((__force u32)tuple->src.u3.ip,
216 (__force u32)tuple->dst.u3.ip, 0);
217 *var_ipp = htonl(minip + j % (maxip - minip + 1));
218}
219
220/* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING,
221 * we change the source to map into the range. For NF_IP_PRE_ROUTING
222 * and NF_IP_LOCAL_OUT, we change the destination to map into the
223 * range. It might not be possible to get a unique tuple, but we try.
224 * At worst (or if we race), we will end up with a final duplicate in
225 * __ip_conntrack_confirm and drop the packet. */
226static void
227get_unique_tuple(struct nf_conntrack_tuple *tuple,
228 const struct nf_conntrack_tuple *orig_tuple,
229 const struct nf_nat_range *range,
230 struct nf_conn *ct,
231 enum nf_nat_manip_type maniptype)
232{
233 struct nf_nat_protocol *proto;
234
235 /* 1) If this srcip/proto/src-proto-part is currently mapped,
236 and that same mapping gives a unique tuple within the given
237 range, use that.
238
239 This is only required for source (ie. NAT/masq) mappings.
240 So far, we don't do local source mappings, so multiple
241 manips not an issue. */
242 if (maniptype == IP_NAT_MANIP_SRC) {
243 if (find_appropriate_src(orig_tuple, tuple, range)) {
244 DEBUGP("get_unique_tuple: Found current src map\n");
41f4689a
EL
245 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
246 if (!nf_nat_used_tuple(tuple, ct))
247 return;
5b1158e9
JK
248 }
249 }
250
251 /* 2) Select the least-used IP/proto combination in the given
252 range. */
253 *tuple = *orig_tuple;
254 find_best_ips_proto(tuple, range, ct, maniptype);
255
256 /* 3) The per-protocol part of the manip is made to map into
257 the range to make a unique tuple. */
258
e22a0548
PM
259 rcu_read_lock();
260 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
5b1158e9 261
41f4689a
EL
262 /* Change protocol info to have some randomization */
263 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
264 proto->unique_tuple(tuple, range, maniptype, ct);
e22a0548 265 goto out;
41f4689a
EL
266 }
267
5b1158e9
JK
268 /* Only bother mapping if it's not already in range and unique */
269 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
270 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
e22a0548
PM
271 !nf_nat_used_tuple(tuple, ct))
272 goto out;
5b1158e9
JK
273
274 /* Last change: get protocol to try to obtain unique tuple. */
275 proto->unique_tuple(tuple, range, maniptype, ct);
e22a0548
PM
276out:
277 rcu_read_unlock();
5b1158e9
JK
278}
279
280unsigned int
281nf_nat_setup_info(struct nf_conn *ct,
282 const struct nf_nat_range *range,
283 unsigned int hooknum)
284{
285 struct nf_conntrack_tuple curr_tuple, new_tuple;
2d59e5ca 286 struct nf_conn_nat *nat;
5b1158e9
JK
287 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
288 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
289
2d59e5ca
YK
290 /* nat helper or nfctnetlink also setup binding */
291 nat = nfct_nat(ct);
292 if (!nat) {
293 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
294 if (nat == NULL) {
295 DEBUGP("failed to add NAT extension\n");
296 return NF_ACCEPT;
297 }
298 }
299
5b1158e9
JK
300 NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
301 hooknum == NF_IP_POST_ROUTING ||
302 hooknum == NF_IP_LOCAL_IN ||
303 hooknum == NF_IP_LOCAL_OUT);
304 BUG_ON(nf_nat_initialized(ct, maniptype));
305
306 /* What we've got will look like inverse of reply. Normally
307 this is what is in the conntrack, except for prior
308 manipulations (future optimization: if num_manips == 0,
309 orig_tp =
310 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
311 nf_ct_invert_tuplepr(&curr_tuple,
312 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
313
314 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
315
316 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
317 struct nf_conntrack_tuple reply;
318
319 /* Alter conntrack table so will recognize replies. */
320 nf_ct_invert_tuplepr(&reply, &new_tuple);
321 nf_conntrack_alter_reply(ct, &reply);
322
323 /* Non-atomic: we own this at the moment. */
324 if (maniptype == IP_NAT_MANIP_SRC)
325 ct->status |= IPS_SRC_NAT;
326 else
327 ct->status |= IPS_DST_NAT;
328 }
329
330 /* Place in source hash if this is the first time. */
331 if (have_to_hash) {
332 unsigned int srchash;
333
334 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
335 write_lock_bh(&nf_nat_lock);
2d59e5ca 336 /* nf_conntrack_alter_reply might re-allocate exntension aera */
b6b84d4a
YK
337 nat = nfct_nat(ct);
338 nat->ct = ct;
339 list_add(&nat->bysource, &bysource[srchash]);
5b1158e9
JK
340 write_unlock_bh(&nf_nat_lock);
341 }
342
343 /* It's done. */
344 if (maniptype == IP_NAT_MANIP_DST)
345 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
346 else
347 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
348
349 return NF_ACCEPT;
350}
351EXPORT_SYMBOL(nf_nat_setup_info);
352
353/* Returns true if succeeded. */
354static int
355manip_pkt(u_int16_t proto,
356 struct sk_buff **pskb,
357 unsigned int iphdroff,
358 const struct nf_conntrack_tuple *target,
359 enum nf_nat_manip_type maniptype)
360{
361 struct iphdr *iph;
362 struct nf_nat_protocol *p;
363
364 if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
365 return 0;
366
367 iph = (void *)(*pskb)->data + iphdroff;
368
369 /* Manipulate protcol part. */
e22a0548
PM
370
371 /* rcu_read_lock()ed by nf_hook_slow */
372 p = __nf_nat_proto_find(proto);
373 if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
5b1158e9 374 return 0;
5b1158e9
JK
375
376 iph = (void *)(*pskb)->data + iphdroff;
377
378 if (maniptype == IP_NAT_MANIP_SRC) {
379 nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
380 iph->saddr = target->src.u3.ip;
381 } else {
382 nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
383 iph->daddr = target->dst.u3.ip;
384 }
385 return 1;
386}
387
388/* Do packet manipulations according to nf_nat_setup_info. */
389unsigned int nf_nat_packet(struct nf_conn *ct,
390 enum ip_conntrack_info ctinfo,
391 unsigned int hooknum,
392 struct sk_buff **pskb)
393{
394 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
395 unsigned long statusbit;
396 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
397
398 if (mtype == IP_NAT_MANIP_SRC)
399 statusbit = IPS_SRC_NAT;
400 else
401 statusbit = IPS_DST_NAT;
402
403 /* Invert if this is reply dir. */
404 if (dir == IP_CT_DIR_REPLY)
405 statusbit ^= IPS_NAT_MASK;
406
407 /* Non-atomic: these bits don't change. */
408 if (ct->status & statusbit) {
409 struct nf_conntrack_tuple target;
410
411 /* We are aiming to look like inverse of other direction. */
412 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
413
414 if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
415 return NF_DROP;
416 }
417 return NF_ACCEPT;
418}
419EXPORT_SYMBOL_GPL(nf_nat_packet);
420
421/* Dir is direction ICMP is coming from (opposite to packet it contains) */
422int nf_nat_icmp_reply_translation(struct nf_conn *ct,
423 enum ip_conntrack_info ctinfo,
424 unsigned int hooknum,
425 struct sk_buff **pskb)
426{
427 struct {
428 struct icmphdr icmp;
429 struct iphdr ip;
430 } *inside;
923f4902 431 struct nf_conntrack_l4proto *l4proto;
5b1158e9 432 struct nf_conntrack_tuple inner, target;
c9bdd4b5 433 int hdrlen = ip_hdrlen(*pskb);
5b1158e9
JK
434 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
435 unsigned long statusbit;
436 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
437
438 if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
439 return 0;
440
c9bdd4b5 441 inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
5b1158e9
JK
442
443 /* We're actually going to mangle it beyond trivial checksum
444 adjustment, so make sure the current checksum is correct. */
445 if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0))
446 return 0;
447
448 /* Must be RELATED */
449 NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED ||
450 (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
451
452 /* Redirects on non-null nats must be dropped, else they'll
e905a9ed
YH
453 start talking to each other without our translation, and be
454 confused... --RR */
5b1158e9
JK
455 if (inside->icmp.type == ICMP_REDIRECT) {
456 /* If NAT isn't finished, assume it and drop. */
457 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
458 return 0;
459
460 if (ct->status & IPS_NAT_MASK)
461 return 0;
462 }
463
464 DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
465 *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
466
923f4902
PM
467 /* rcu_read_lock()ed by nf_hook_slow */
468 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
469
5b1158e9 470 if (!nf_ct_get_tuple(*pskb,
c9bdd4b5
ACM
471 ip_hdrlen(*pskb) + sizeof(struct icmphdr),
472 (ip_hdrlen(*pskb) +
473 sizeof(struct icmphdr) + inside->ip.ihl * 4),
e905a9ed
YH
474 (u_int16_t)AF_INET,
475 inside->ip.protocol,
923f4902 476 &inner, l3proto, l4proto))
5b1158e9
JK
477 return 0;
478
479 /* Change inner back to look like incoming packet. We do the
480 opposite manip on this hook to normal, because it might not
481 pass all hooks (locally-generated ICMP). Consider incoming
482 packet: PREROUTING (DST manip), routing produces ICMP, goes
483 through POSTROUTING (which must correct the DST manip). */
484 if (!manip_pkt(inside->ip.protocol, pskb,
c9bdd4b5 485 ip_hdrlen(*pskb) + sizeof(inside->icmp),
5b1158e9
JK
486 &ct->tuplehash[!dir].tuple,
487 !manip))
488 return 0;
489
490 if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
491 /* Reloading "inside" here since manip_pkt inner. */
c9bdd4b5 492 inside = (void *)(*pskb)->data + ip_hdrlen(*pskb);
5b1158e9
JK
493 inside->icmp.checksum = 0;
494 inside->icmp.checksum =
495 csum_fold(skb_checksum(*pskb, hdrlen,
496 (*pskb)->len - hdrlen, 0));
497 }
498
499 /* Change outer to look the reply to an incoming packet
500 * (proto 0 means don't invert per-proto part). */
501 if (manip == IP_NAT_MANIP_SRC)
502 statusbit = IPS_SRC_NAT;
503 else
504 statusbit = IPS_DST_NAT;
505
506 /* Invert if this is reply dir. */
507 if (dir == IP_CT_DIR_REPLY)
508 statusbit ^= IPS_NAT_MASK;
509
510 if (ct->status & statusbit) {
511 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
512 if (!manip_pkt(0, pskb, 0, &target, manip))
513 return 0;
514 }
515
516 return 1;
517}
518EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
519
520/* Protocol registration. */
521int nf_nat_protocol_register(struct nf_nat_protocol *proto)
522{
523 int ret = 0;
524
525 write_lock_bh(&nf_nat_lock);
526 if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
527 ret = -EBUSY;
528 goto out;
529 }
e22a0548 530 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
5b1158e9
JK
531 out:
532 write_unlock_bh(&nf_nat_lock);
533 return ret;
534}
535EXPORT_SYMBOL(nf_nat_protocol_register);
536
537/* Noone stores the protocol anywhere; simply delete it. */
538void nf_nat_protocol_unregister(struct nf_nat_protocol *proto)
539{
540 write_lock_bh(&nf_nat_lock);
e22a0548
PM
541 rcu_assign_pointer(nf_nat_protos[proto->protonum],
542 &nf_nat_unknown_protocol);
5b1158e9 543 write_unlock_bh(&nf_nat_lock);
e22a0548 544 synchronize_rcu();
5b1158e9
JK
545}
546EXPORT_SYMBOL(nf_nat_protocol_unregister);
547
e281db5c 548#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
5b1158e9
JK
549int
550nf_nat_port_range_to_nfattr(struct sk_buff *skb,
551 const struct nf_nat_range *range)
552{
553 NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
554 &range->min.tcp.port);
555 NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
556 &range->max.tcp.port);
557
558 return 0;
559
560nfattr_failure:
561 return -1;
562}
563EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range);
564
565int
566nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range)
567{
568 int ret = 0;
569
570 /* we have to return whether we actually parsed something or not */
571
572 if (tb[CTA_PROTONAT_PORT_MIN-1]) {
573 ret = 1;
574 range->min.tcp.port =
575 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
576 }
577
578 if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
579 if (ret)
580 range->max.tcp.port = range->min.tcp.port;
581 } else {
582 ret = 1;
583 range->max.tcp.port =
584 *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
585 }
586
587 return ret;
588}
589EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr);
590#endif
591
d8a0509a
YK
592/* Noone using conntrack by the time this called. */
593static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
594{
595 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
596
b6b84d4a 597 if (nat == NULL || nat->ct == NULL)
d8a0509a
YK
598 return;
599
b6b84d4a 600 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
d8a0509a
YK
601
602 write_lock_bh(&nf_nat_lock);
b6b84d4a
YK
603 list_del(&nat->bysource);
604 nat->ct = NULL;
d8a0509a
YK
605 write_unlock_bh(&nf_nat_lock);
606}
607
2d59e5ca
YK
608static void nf_nat_move_storage(struct nf_conn *conntrack, void *old)
609{
610 struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT);
611 struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old;
b6b84d4a 612 struct nf_conn *ct = old_nat->ct;
2d59e5ca
YK
613 unsigned int srchash;
614
615 if (!(ct->status & IPS_NAT_DONE_MASK))
616 return;
617
618 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
619
620 write_lock_bh(&nf_nat_lock);
b6b84d4a
YK
621 list_replace(&old_nat->bysource, &new_nat->bysource);
622 new_nat->ct = ct;
2d59e5ca
YK
623 write_unlock_bh(&nf_nat_lock);
624}
625
626struct nf_ct_ext_type nat_extend = {
d8a0509a
YK
627 .len = sizeof(struct nf_conn_nat),
628 .align = __alignof__(struct nf_conn_nat),
629 .destroy = nf_nat_cleanup_conntrack,
630 .move = nf_nat_move_storage,
631 .id = NF_CT_EXT_NAT,
632 .flags = NF_CT_EXT_F_PREALLOC,
2d59e5ca
YK
633};
634
5b1158e9
JK
635static int __init nf_nat_init(void)
636{
637 size_t i;
2d59e5ca
YK
638 int ret;
639
640 ret = nf_ct_extend_register(&nat_extend);
641 if (ret < 0) {
642 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
643 return ret;
644 }
5b1158e9
JK
645
646 /* Leave them the same for the moment. */
647 nf_nat_htable_size = nf_conntrack_htable_size;
648
649 /* One vmalloc for both hash tables */
650 bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size);
2d59e5ca
YK
651 if (!bysource) {
652 ret = -ENOMEM;
653 goto cleanup_extend;
654 }
5b1158e9
JK
655
656 /* Sew in builtin protocols. */
657 write_lock_bh(&nf_nat_lock);
658 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
e22a0548
PM
659 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
660 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
661 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
662 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
5b1158e9
JK
663 write_unlock_bh(&nf_nat_lock);
664
665 for (i = 0; i < nf_nat_htable_size; i++) {
666 INIT_LIST_HEAD(&bysource[i]);
667 }
668
5b1158e9
JK
669 /* Initialize fake conntrack so that NAT will skip it */
670 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
671
672 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
673 return 0;
2d59e5ca
YK
674
675 cleanup_extend:
676 nf_ct_extend_unregister(&nat_extend);
677 return ret;
5b1158e9
JK
678}
679
680/* Clear NAT section of all conntracks, in case we're loaded again. */
681static int clean_nat(struct nf_conn *i, void *data)
682{
683 struct nf_conn_nat *nat = nfct_nat(i);
684
685 if (!nat)
686 return 0;
687 memset(nat, 0, sizeof(nat));
688 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
689 return 0;
690}
691
692static void __exit nf_nat_cleanup(void)
693{
694 nf_ct_iterate_cleanup(&clean_nat, NULL);
982d9a9c 695 synchronize_rcu();
5b1158e9
JK
696 vfree(bysource);
697 nf_ct_l3proto_put(l3proto);
2d59e5ca 698 nf_ct_extend_unregister(&nat_extend);
5b1158e9
JK
699}
700
701MODULE_LICENSE("GPL");
702
703module_init(nf_nat_init);
704module_exit(nf_nat_cleanup);