Merge tag 'v3.10.103' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry *e)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return e->target_offset == sizeof(struct ip6t_entry) &&
203 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
204 }
205
206
207 static inline const struct xt_entry_target *
208 ip6t_get_target_c(const struct ip6t_entry *e)
209 {
210 return ip6t_get_target((struct ip6t_entry *)e);
211 }
212
213 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
214 /* This cries for unification! */
215 static const char *const hooknames[] = {
216 [NF_INET_PRE_ROUTING] = "PREROUTING",
217 [NF_INET_LOCAL_IN] = "INPUT",
218 [NF_INET_FORWARD] = "FORWARD",
219 [NF_INET_LOCAL_OUT] = "OUTPUT",
220 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 };
222
223 enum nf_ip_trace_comments {
224 NF_IP6_TRACE_COMMENT_RULE,
225 NF_IP6_TRACE_COMMENT_RETURN,
226 NF_IP6_TRACE_COMMENT_POLICY,
227 };
228
229 static const char *const comments[] = {
230 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
231 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
232 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 };
234
235 static struct nf_loginfo trace_loginfo = {
236 .type = NF_LOG_TYPE_LOG,
237 .u = {
238 .log = {
239 .level = 4,
240 .logflags = NF_LOG_MASK,
241 },
242 },
243 };
244
245 /* Mildly perf critical (only if packet tracing is on) */
246 static inline int
247 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
248 const char *hookname, const char **chainname,
249 const char **comment, unsigned int *rulenum)
250 {
251 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
252
253 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
254 /* Head of user chain: ERROR target with chainname */
255 *chainname = t->target.data;
256 (*rulenum) = 0;
257 } else if (s == e) {
258 (*rulenum)++;
259
260 if (unconditional(s) &&
261 strcmp(t->target.u.kernel.target->name,
262 XT_STANDARD_TARGET) == 0 &&
263 t->verdict < 0) {
264 /* Tail of chains: STANDARD target (return/policy) */
265 *comment = *chainname == hookname
266 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
267 : comments[NF_IP6_TRACE_COMMENT_RETURN];
268 }
269 return 1;
270 } else
271 (*rulenum)++;
272
273 return 0;
274 }
275
276 static void trace_packet(const struct sk_buff *skb,
277 unsigned int hook,
278 const struct net_device *in,
279 const struct net_device *out,
280 const char *tablename,
281 const struct xt_table_info *private,
282 const struct ip6t_entry *e)
283 {
284 const void *table_base;
285 const struct ip6t_entry *root;
286 const char *hookname, *chainname, *comment;
287 const struct ip6t_entry *iter;
288 unsigned int rulenum = 0;
289 struct net *net = dev_net(in ? in : out);
290
291 table_base = private->entries[smp_processor_id()];
292 root = get_entry(table_base, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline __pure struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
316 ip6t_do_table(struct sk_buff *skb,
317 unsigned int hook,
318 const struct net_device *in,
319 const struct net_device *out,
320 struct xt_table *table)
321 {
322 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
323 /* Initializing verdict to NF_DROP keeps gcc happy. */
324 unsigned int verdict = NF_DROP;
325 const char *indev, *outdev;
326 const void *table_base;
327 struct ip6t_entry *e, **jumpstack;
328 unsigned int *stackptr, origptr, cpu;
329 const struct xt_table_info *private;
330 struct xt_action_param acpar;
331 unsigned int addend;
332
333 /* Initialization */
334 indev = in ? in->name : nulldevname;
335 outdev = out ? out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.in = in;
344 acpar.out = out;
345 acpar.family = NFPROTO_IPV6;
346 acpar.hooknum = hook;
347
348 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
349
350 local_bh_disable();
351 addend = xt_write_recseq_begin();
352 private = table->private;
353 cpu = smp_processor_id();
354 table_base = private->entries[cpu];
355 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
356 stackptr = per_cpu_ptr(private->stackptr, cpu);
357 origptr = *stackptr;
358
359 e = get_entry(table_base, private->hook_entry[hook]);
360
361 do {
362 const struct xt_entry_target *t;
363 const struct xt_entry_match *ematch;
364
365 IP_NF_ASSERT(e);
366 acpar.thoff = 0;
367 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
368 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
369 no_match:
370 e = ip6t_next_entry(e);
371 continue;
372 }
373
374 xt_ematch_foreach(ematch, e) {
375 acpar.match = ematch->u.kernel.match;
376 acpar.matchinfo = ematch->data;
377 if (!acpar.match->match(skb, &acpar))
378 goto no_match;
379 }
380
381 ADD_COUNTER(e->counters, skb->len, 1);
382
383 t = ip6t_get_target_c(e);
384 IP_NF_ASSERT(t->u.kernel.target);
385
386 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
387 /* The packet is traced: log it */
388 if (unlikely(skb->nf_trace))
389 trace_packet(skb, hook, in, out,
390 table->name, private, e);
391 #endif
392 /* Standard target? */
393 if (!t->u.kernel.target->target) {
394 int v;
395
396 v = ((struct xt_standard_target *)t)->verdict;
397 if (v < 0) {
398 /* Pop from stack? */
399 if (v != XT_RETURN) {
400 verdict = (unsigned int)(-v) - 1;
401 break;
402 }
403 if (*stackptr <= origptr)
404 e = get_entry(table_base,
405 private->underflow[hook]);
406 else
407 e = ip6t_next_entry(jumpstack[--*stackptr]);
408 continue;
409 }
410 if (table_base + v != ip6t_next_entry(e) &&
411 !(e->ipv6.flags & IP6T_F_GOTO)) {
412 if (*stackptr >= private->stacksize) {
413 verdict = NF_DROP;
414 break;
415 }
416 jumpstack[(*stackptr)++] = e;
417 }
418
419 e = get_entry(table_base, v);
420 continue;
421 }
422
423 acpar.target = t->u.kernel.target;
424 acpar.targinfo = t->data;
425
426 verdict = t->u.kernel.target->target(skb, &acpar);
427 if (verdict == XT_CONTINUE)
428 e = ip6t_next_entry(e);
429 else
430 /* Verdict */
431 break;
432 } while (!acpar.hotdrop);
433
434 *stackptr = origptr;
435
436 xt_write_recseq_end(addend);
437 local_bh_enable();
438
439 #ifdef DEBUG_ALLOW_ALL
440 return NF_ACCEPT;
441 #else
442 if (acpar.hotdrop)
443 return NF_DROP;
444 else return verdict;
445 #endif
446 }
447
448 /* Figures out from what hook each rule can be called: returns 0 if
449 there are loops. Puts hook bitmask in comefrom. */
450 static int
451 mark_source_chains(const struct xt_table_info *newinfo,
452 unsigned int valid_hooks, void *entry0)
453 {
454 unsigned int hook;
455
456 /* No recursion; use packet counter to save back ptrs (reset
457 to 0 as we leave), and comefrom to save source hook bitmask */
458 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
459 unsigned int pos = newinfo->hook_entry[hook];
460 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
461
462 if (!(valid_hooks & (1 << hook)))
463 continue;
464
465 /* Set initial back pointer. */
466 e->counters.pcnt = pos;
467
468 for (;;) {
469 const struct xt_standard_target *t
470 = (void *)ip6t_get_target_c(e);
471 int visited = e->comefrom & (1 << hook);
472
473 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
474 pr_err("iptables: loop hook %u pos %u %08X.\n",
475 hook, pos, e->comefrom);
476 return 0;
477 }
478 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
479
480 /* Unconditional return/END. */
481 if ((unconditional(e) &&
482 (strcmp(t->target.u.user.name,
483 XT_STANDARD_TARGET) == 0) &&
484 t->verdict < 0) || visited) {
485 unsigned int oldpos, size;
486
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
492 t->verdict);
493 return 0;
494 }
495
496 /* Return: backtrack through the last
497 big jump. */
498 do {
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
501 if (e->comefrom
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
504 "on hook %u "
505 "rule %u\n",
506 hook, pos);
507 }
508 #endif
509 oldpos = pos;
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
512
513 /* We're at the start. */
514 if (pos == oldpos)
515 goto next;
516
517 e = (struct ip6t_entry *)
518 (entry0 + pos);
519 } while (oldpos == pos + e->next_offset);
520
521 /* Move along one */
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 if (pos + size >= newinfo->size)
526 return 0;
527 e->counters.pcnt = pos;
528 pos += size;
529 } else {
530 int newpos = t->verdict;
531
532 if (strcmp(t->target.u.user.name,
533 XT_STANDARD_TARGET) == 0 &&
534 newpos >= 0) {
535 if (newpos > newinfo->size -
536 sizeof(struct ip6t_entry)) {
537 duprintf("mark_source_chains: "
538 "bad verdict (%i)\n",
539 newpos);
540 return 0;
541 }
542 /* This a jump; chase it. */
543 duprintf("Jump rule %u -> %u\n",
544 pos, newpos);
545 } else {
546 /* ... this is a fallthru */
547 newpos = pos + e->next_offset;
548 if (newpos >= newinfo->size)
549 return 0;
550 }
551 e = (struct ip6t_entry *)
552 (entry0 + newpos);
553 e->counters.pcnt = pos;
554 pos = newpos;
555 }
556 }
557 next:
558 duprintf("Finished chain %u\n", hook);
559 }
560 return 1;
561 }
562
563 static void cleanup_match(struct xt_entry_match *m, struct net *net)
564 {
565 struct xt_mtdtor_param par;
566
567 par.net = net;
568 par.match = m->u.kernel.match;
569 par.matchinfo = m->data;
570 par.family = NFPROTO_IPV6;
571 if (par.match->destroy != NULL)
572 par.match->destroy(&par);
573 module_put(par.match->me);
574 }
575
576 static int
577 check_entry(const struct ip6t_entry *e)
578 {
579 const struct xt_entry_target *t;
580
581 if (!ip6_checkentry(&e->ipv6))
582 return -EINVAL;
583
584 if (e->target_offset + sizeof(struct xt_entry_target) >
585 e->next_offset)
586 return -EINVAL;
587
588 t = ip6t_get_target_c(e);
589 if (e->target_offset + t->u.target_size > e->next_offset)
590 return -EINVAL;
591
592 return 0;
593 }
594
595 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
596 {
597 const struct ip6t_ip6 *ipv6 = par->entryinfo;
598 int ret;
599
600 par->match = m->u.kernel.match;
601 par->matchinfo = m->data;
602
603 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
604 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
605 if (ret < 0) {
606 duprintf("ip_tables: check failed for `%s'.\n",
607 par.match->name);
608 return ret;
609 }
610 return 0;
611 }
612
613 static int
614 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
615 {
616 struct xt_match *match;
617 int ret;
618
619 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
620 m->u.user.revision);
621 if (IS_ERR(match)) {
622 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
623 return PTR_ERR(match);
624 }
625 m->u.kernel.match = match;
626
627 ret = check_match(m, par);
628 if (ret)
629 goto err;
630
631 return 0;
632 err:
633 module_put(m->u.kernel.match->me);
634 return ret;
635 }
636
637 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
638 {
639 struct xt_entry_target *t = ip6t_get_target(e);
640 struct xt_tgchk_param par = {
641 .net = net,
642 .table = name,
643 .entryinfo = e,
644 .target = t->u.kernel.target,
645 .targinfo = t->data,
646 .hook_mask = e->comefrom,
647 .family = NFPROTO_IPV6,
648 };
649 int ret;
650
651 t = ip6t_get_target(e);
652 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
653 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
654 if (ret < 0) {
655 duprintf("ip_tables: check failed for `%s'.\n",
656 t->u.kernel.target->name);
657 return ret;
658 }
659 return 0;
660 }
661
662 static int
663 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
664 unsigned int size)
665 {
666 struct xt_entry_target *t;
667 struct xt_target *target;
668 int ret;
669 unsigned int j;
670 struct xt_mtchk_param mtpar;
671 struct xt_entry_match *ematch;
672
673 j = 0;
674 mtpar.net = net;
675 mtpar.table = name;
676 mtpar.entryinfo = &e->ipv6;
677 mtpar.hook_mask = e->comefrom;
678 mtpar.family = NFPROTO_IPV6;
679 xt_ematch_foreach(ematch, e) {
680 ret = find_check_match(ematch, &mtpar);
681 if (ret != 0)
682 goto cleanup_matches;
683 ++j;
684 }
685
686 t = ip6t_get_target(e);
687 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
688 t->u.user.revision);
689 if (IS_ERR(target)) {
690 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
691 ret = PTR_ERR(target);
692 goto cleanup_matches;
693 }
694 t->u.kernel.target = target;
695
696 ret = check_target(e, net, name);
697 if (ret)
698 goto err;
699 return 0;
700 err:
701 module_put(t->u.kernel.target->me);
702 cleanup_matches:
703 xt_ematch_foreach(ematch, e) {
704 if (j-- == 0)
705 break;
706 cleanup_match(ematch, net);
707 }
708 return ret;
709 }
710
711 static bool check_underflow(const struct ip6t_entry *e)
712 {
713 const struct xt_entry_target *t;
714 unsigned int verdict;
715
716 if (!unconditional(e))
717 return false;
718 t = ip6t_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false;
721 verdict = ((struct xt_standard_target *)t)->verdict;
722 verdict = -verdict - 1;
723 return verdict == NF_DROP || verdict == NF_ACCEPT;
724 }
725
726 static int
727 check_entry_size_and_hooks(struct ip6t_entry *e,
728 struct xt_table_info *newinfo,
729 const unsigned char *base,
730 const unsigned char *limit,
731 const unsigned int *hook_entries,
732 const unsigned int *underflows,
733 unsigned int valid_hooks)
734 {
735 unsigned int h;
736 int err;
737
738 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
739 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
740 (unsigned char *)e + e->next_offset > limit) {
741 duprintf("Bad offset %p\n", e);
742 return -EINVAL;
743 }
744
745 if (e->next_offset
746 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
747 duprintf("checking: element %p size %u\n",
748 e, e->next_offset);
749 return -EINVAL;
750 }
751
752 err = check_entry(e);
753
754 if (err)
755 return err;
756
757 /* Check hooks & underflows */
758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
759 if (!(valid_hooks & (1 << h)))
760 continue;
761 if ((unsigned char *)e - base == hook_entries[h])
762 newinfo->hook_entry[h] = hook_entries[h];
763 if ((unsigned char *)e - base == underflows[h]) {
764 if (!check_underflow(e)) {
765 pr_debug("Underflows must be unconditional and "
766 "use the STANDARD target with "
767 "ACCEPT/DROP\n");
768 return -EINVAL;
769 }
770 newinfo->underflow[h] = underflows[h];
771 }
772 }
773
774 /* Clear counters and comefrom */
775 e->counters = ((struct xt_counters) { 0, 0 });
776 e->comefrom = 0;
777 return 0;
778 }
779
780 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
781 {
782 struct xt_tgdtor_param par;
783 struct xt_entry_target *t;
784 struct xt_entry_match *ematch;
785
786 /* Cleanup all matches */
787 xt_ematch_foreach(ematch, e)
788 cleanup_match(ematch, net);
789 t = ip6t_get_target(e);
790
791 par.net = net;
792 par.target = t->u.kernel.target;
793 par.targinfo = t->data;
794 par.family = NFPROTO_IPV6;
795 if (par.target->destroy != NULL)
796 par.target->destroy(&par);
797 module_put(par.target->me);
798 }
799
800 /* Checks and translates the user-supplied table segment (held in
801 newinfo) */
802 static int
803 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
804 const struct ip6t_replace *repl)
805 {
806 struct ip6t_entry *iter;
807 unsigned int i;
808 int ret = 0;
809
810 newinfo->size = repl->size;
811 newinfo->number = repl->num_entries;
812
813 /* Init all hooks to impossible value. */
814 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
815 newinfo->hook_entry[i] = 0xFFFFFFFF;
816 newinfo->underflow[i] = 0xFFFFFFFF;
817 }
818
819 duprintf("translate_table: size %u\n", newinfo->size);
820 i = 0;
821 /* Walk through entries, checking offsets. */
822 xt_entry_foreach(iter, entry0, newinfo->size) {
823 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
824 entry0 + repl->size,
825 repl->hook_entry,
826 repl->underflow,
827 repl->valid_hooks);
828 if (ret != 0)
829 return ret;
830 ++i;
831 if (strcmp(ip6t_get_target(iter)->u.user.name,
832 XT_ERROR_TARGET) == 0)
833 ++newinfo->stacksize;
834 }
835
836 if (i != repl->num_entries) {
837 duprintf("translate_table: %u not %u entries\n",
838 i, repl->num_entries);
839 return -EINVAL;
840 }
841
842 /* Check hooks all assigned */
843 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
844 /* Only hooks which are valid */
845 if (!(repl->valid_hooks & (1 << i)))
846 continue;
847 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
848 duprintf("Invalid hook entry %u %u\n",
849 i, repl->hook_entry[i]);
850 return -EINVAL;
851 }
852 if (newinfo->underflow[i] == 0xFFFFFFFF) {
853 duprintf("Invalid underflow %u %u\n",
854 i, repl->underflow[i]);
855 return -EINVAL;
856 }
857 }
858
859 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
860 return -ELOOP;
861
862 /* Finally, each sanity check must pass */
863 i = 0;
864 xt_entry_foreach(iter, entry0, newinfo->size) {
865 ret = find_check_entry(iter, net, repl->name, repl->size);
866 if (ret != 0)
867 break;
868 ++i;
869 }
870
871 if (ret != 0) {
872 xt_entry_foreach(iter, entry0, newinfo->size) {
873 if (i-- == 0)
874 break;
875 cleanup_entry(iter, net);
876 }
877 return ret;
878 }
879
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i) {
882 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
883 memcpy(newinfo->entries[i], entry0, newinfo->size);
884 }
885
886 return ret;
887 }
888
889 static void
890 get_counters(const struct xt_table_info *t,
891 struct xt_counters counters[])
892 {
893 struct ip6t_entry *iter;
894 unsigned int cpu;
895 unsigned int i;
896
897 for_each_possible_cpu(cpu) {
898 seqcount_t *s = &per_cpu(xt_recseq, cpu);
899
900 i = 0;
901 xt_entry_foreach(iter, t->entries[cpu], t->size) {
902 u64 bcnt, pcnt;
903 unsigned int start;
904
905 do {
906 start = read_seqcount_begin(s);
907 bcnt = iter->counters.bcnt;
908 pcnt = iter->counters.pcnt;
909 } while (read_seqcount_retry(s, start));
910
911 ADD_COUNTER(counters[i], bcnt, pcnt);
912 ++i;
913 }
914 }
915 }
916
917 static struct xt_counters *alloc_counters(const struct xt_table *table)
918 {
919 unsigned int countersize;
920 struct xt_counters *counters;
921 const struct xt_table_info *private = table->private;
922
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
925 about). */
926 countersize = sizeof(struct xt_counters) * private->number;
927 counters = vzalloc(countersize);
928
929 if (counters == NULL)
930 return ERR_PTR(-ENOMEM);
931
932 get_counters(private, counters);
933
934 return counters;
935 }
936
937 static int
938 copy_entries_to_user(unsigned int total_size,
939 const struct xt_table *table,
940 void __user *userptr)
941 {
942 unsigned int off, num;
943 const struct ip6t_entry *e;
944 struct xt_counters *counters;
945 const struct xt_table_info *private = table->private;
946 int ret = 0;
947 const void *loc_cpu_entry;
948
949 counters = alloc_counters(table);
950 if (IS_ERR(counters))
951 return PTR_ERR(counters);
952
953 /* choose the copy that is on our node/cpu, ...
954 * This choice is lazy (because current thread is
955 * allowed to migrate to another cpu)
956 */
957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
958 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
959 ret = -EFAULT;
960 goto free_counters;
961 }
962
963 /* FIXME: use iterator macros --RR */
964 /* ... then go back and fix counters and names */
965 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
966 unsigned int i;
967 const struct xt_entry_match *m;
968 const struct xt_entry_target *t;
969
970 e = (struct ip6t_entry *)(loc_cpu_entry + off);
971 if (copy_to_user(userptr + off
972 + offsetof(struct ip6t_entry, counters),
973 &counters[num],
974 sizeof(counters[num])) != 0) {
975 ret = -EFAULT;
976 goto free_counters;
977 }
978
979 for (i = sizeof(struct ip6t_entry);
980 i < e->target_offset;
981 i += m->u.match_size) {
982 m = (void *)e + i;
983
984 if (copy_to_user(userptr + off + i
985 + offsetof(struct xt_entry_match,
986 u.user.name),
987 m->u.kernel.match->name,
988 strlen(m->u.kernel.match->name)+1)
989 != 0) {
990 ret = -EFAULT;
991 goto free_counters;
992 }
993 }
994
995 t = ip6t_get_target_c(e);
996 if (copy_to_user(userptr + off + e->target_offset
997 + offsetof(struct xt_entry_target,
998 u.user.name),
999 t->u.kernel.target->name,
1000 strlen(t->u.kernel.target->name)+1) != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1003 }
1004 }
1005
1006 free_counters:
1007 vfree(counters);
1008 return ret;
1009 }
1010
1011 #ifdef CONFIG_COMPAT
1012 static void compat_standard_from_user(void *dst, const void *src)
1013 {
1014 int v = *(compat_int_t *)src;
1015
1016 if (v > 0)
1017 v += xt_compat_calc_jump(AF_INET6, v);
1018 memcpy(dst, &v, sizeof(v));
1019 }
1020
1021 static int compat_standard_to_user(void __user *dst, const void *src)
1022 {
1023 compat_int_t cv = *(int *)src;
1024
1025 if (cv > 0)
1026 cv -= xt_compat_calc_jump(AF_INET6, cv);
1027 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1028 }
1029
1030 static int compat_calc_entry(const struct ip6t_entry *e,
1031 const struct xt_table_info *info,
1032 const void *base, struct xt_table_info *newinfo)
1033 {
1034 const struct xt_entry_match *ematch;
1035 const struct xt_entry_target *t;
1036 unsigned int entry_offset;
1037 int off, i, ret;
1038
1039 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1040 entry_offset = (void *)e - base;
1041 xt_ematch_foreach(ematch, e)
1042 off += xt_compat_match_offset(ematch->u.kernel.match);
1043 t = ip6t_get_target_c(e);
1044 off += xt_compat_target_offset(t->u.kernel.target);
1045 newinfo->size -= off;
1046 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1047 if (ret)
1048 return ret;
1049
1050 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1051 if (info->hook_entry[i] &&
1052 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1053 newinfo->hook_entry[i] -= off;
1054 if (info->underflow[i] &&
1055 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1056 newinfo->underflow[i] -= off;
1057 }
1058 return 0;
1059 }
1060
1061 static int compat_table_info(const struct xt_table_info *info,
1062 struct xt_table_info *newinfo)
1063 {
1064 struct ip6t_entry *iter;
1065 void *loc_cpu_entry;
1066 int ret;
1067
1068 if (!newinfo || !info)
1069 return -EINVAL;
1070
1071 /* we dont care about newinfo->entries[] */
1072 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1073 newinfo->initial_entries = 0;
1074 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1075 xt_compat_init_offsets(AF_INET6, info->number);
1076 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1077 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1078 if (ret != 0)
1079 return ret;
1080 }
1081 return 0;
1082 }
1083 #endif
1084
1085 static int get_info(struct net *net, void __user *user,
1086 const int *len, int compat)
1087 {
1088 char name[XT_TABLE_MAXNAMELEN];
1089 struct xt_table *t;
1090 int ret;
1091
1092 if (*len != sizeof(struct ip6t_getinfo)) {
1093 duprintf("length %u != %zu\n", *len,
1094 sizeof(struct ip6t_getinfo));
1095 return -EINVAL;
1096 }
1097
1098 if (copy_from_user(name, user, sizeof(name)) != 0)
1099 return -EFAULT;
1100
1101 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1102 #ifdef CONFIG_COMPAT
1103 if (compat)
1104 xt_compat_lock(AF_INET6);
1105 #endif
1106 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1107 "ip6table_%s", name);
1108 if (!IS_ERR_OR_NULL(t)) {
1109 struct ip6t_getinfo info;
1110 const struct xt_table_info *private = t->private;
1111 #ifdef CONFIG_COMPAT
1112 struct xt_table_info tmp;
1113
1114 if (compat) {
1115 ret = compat_table_info(private, &tmp);
1116 xt_compat_flush_offsets(AF_INET6);
1117 private = &tmp;
1118 }
1119 #endif
1120 memset(&info, 0, sizeof(info));
1121 info.valid_hooks = t->valid_hooks;
1122 memcpy(info.hook_entry, private->hook_entry,
1123 sizeof(info.hook_entry));
1124 memcpy(info.underflow, private->underflow,
1125 sizeof(info.underflow));
1126 info.num_entries = private->number;
1127 info.size = private->size;
1128 strcpy(info.name, name);
1129
1130 if (copy_to_user(user, &info, *len) != 0)
1131 ret = -EFAULT;
1132 else
1133 ret = 0;
1134
1135 xt_table_unlock(t);
1136 module_put(t->me);
1137 } else
1138 ret = t ? PTR_ERR(t) : -ENOENT;
1139 #ifdef CONFIG_COMPAT
1140 if (compat)
1141 xt_compat_unlock(AF_INET6);
1142 #endif
1143 return ret;
1144 }
1145
1146 static int
1147 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1148 const int *len)
1149 {
1150 int ret;
1151 struct ip6t_get_entries get;
1152 struct xt_table *t;
1153
1154 if (*len < sizeof(get)) {
1155 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1156 return -EINVAL;
1157 }
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1159 return -EFAULT;
1160 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len, sizeof(get) + get.size);
1163 return -EINVAL;
1164 }
1165
1166 t = xt_find_table_lock(net, AF_INET6, get.name);
1167 if (!IS_ERR_OR_NULL(t)) {
1168 struct xt_table_info *private = t->private;
1169 duprintf("t->private->number = %u\n", private->number);
1170 if (get.size == private->size)
1171 ret = copy_entries_to_user(private->size,
1172 t, uptr->entrytable);
1173 else {
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size, get.size);
1176 ret = -EAGAIN;
1177 }
1178 module_put(t->me);
1179 xt_table_unlock(t);
1180 } else
1181 ret = t ? PTR_ERR(t) : -ENOENT;
1182
1183 return ret;
1184 }
1185
1186 static int
1187 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1188 struct xt_table_info *newinfo, unsigned int num_counters,
1189 void __user *counters_ptr)
1190 {
1191 int ret;
1192 struct xt_table *t;
1193 struct xt_table_info *oldinfo;
1194 struct xt_counters *counters;
1195 const void *loc_cpu_old_entry;
1196 struct ip6t_entry *iter;
1197
1198 ret = 0;
1199 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1200 if (!counters) {
1201 ret = -ENOMEM;
1202 goto out;
1203 }
1204
1205 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1206 "ip6table_%s", name);
1207 if (IS_ERR_OR_NULL(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1210 }
1211
1212 /* You lied! */
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1216 ret = -EINVAL;
1217 goto put_module;
1218 }
1219
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1221 if (!oldinfo)
1222 goto put_module;
1223
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1229 module_put(t->me);
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1232 module_put(t->me);
1233
1234 /* Get the old counters, and synchronize with replace */
1235 get_counters(oldinfo, counters);
1236
1237 /* Decrease module usage counts and free resource */
1238 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1239 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1240 cleanup_entry(iter, net);
1241
1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0) {
1245 /* Silent error, can't fail, new table is already in place */
1246 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1247 }
1248 vfree(counters);
1249 xt_table_unlock(t);
1250 return ret;
1251
1252 put_module:
1253 module_put(t->me);
1254 xt_table_unlock(t);
1255 free_newinfo_counters_untrans:
1256 vfree(counters);
1257 out:
1258 return ret;
1259 }
1260
1261 static int
1262 do_replace(struct net *net, const void __user *user, unsigned int len)
1263 {
1264 int ret;
1265 struct ip6t_replace tmp;
1266 struct xt_table_info *newinfo;
1267 void *loc_cpu_entry;
1268 struct ip6t_entry *iter;
1269
1270 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1271 return -EFAULT;
1272
1273 /* overflow check */
1274 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1275 return -ENOMEM;
1276 if (tmp.num_counters == 0)
1277 return -EINVAL;
1278
1279 tmp.name[sizeof(tmp.name)-1] = 0;
1280
1281 newinfo = xt_alloc_table_info(tmp.size);
1282 if (!newinfo)
1283 return -ENOMEM;
1284
1285 /* choose the copy that is on our node/cpu */
1286 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1287 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1288 tmp.size) != 0) {
1289 ret = -EFAULT;
1290 goto free_newinfo;
1291 }
1292
1293 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1294 if (ret != 0)
1295 goto free_newinfo;
1296
1297 duprintf("ip_tables: Translated table\n");
1298
1299 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1300 tmp.num_counters, tmp.counters);
1301 if (ret)
1302 goto free_newinfo_untrans;
1303 return 0;
1304
1305 free_newinfo_untrans:
1306 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1307 cleanup_entry(iter, net);
1308 free_newinfo:
1309 xt_free_table_info(newinfo);
1310 return ret;
1311 }
1312
1313 static int
1314 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1315 int compat)
1316 {
1317 unsigned int i, curcpu;
1318 struct xt_counters_info tmp;
1319 struct xt_counters *paddc;
1320 struct xt_table *t;
1321 const struct xt_table_info *private;
1322 int ret = 0;
1323 const void *loc_cpu_entry;
1324 struct ip6t_entry *iter;
1325 unsigned int addend;
1326
1327 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1328 if (IS_ERR(paddc))
1329 return PTR_ERR(paddc);
1330 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1331 if (IS_ERR_OR_NULL(t)) {
1332 ret = t ? PTR_ERR(t) : -ENOENT;
1333 goto free;
1334 }
1335
1336
1337 local_bh_disable();
1338 private = t->private;
1339 if (private->number != tmp.num_counters) {
1340 ret = -EINVAL;
1341 goto unlock_up_free;
1342 }
1343
1344 i = 0;
1345 /* Choose the copy that is on our node */
1346 curcpu = smp_processor_id();
1347 addend = xt_write_recseq_begin();
1348 loc_cpu_entry = private->entries[curcpu];
1349 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1350 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1351 ++i;
1352 }
1353 xt_write_recseq_end(addend);
1354
1355 unlock_up_free:
1356 local_bh_enable();
1357 xt_table_unlock(t);
1358 module_put(t->me);
1359 free:
1360 vfree(paddc);
1361
1362 return ret;
1363 }
1364
1365 #ifdef CONFIG_COMPAT
1366 struct compat_ip6t_replace {
1367 char name[XT_TABLE_MAXNAMELEN];
1368 u32 valid_hooks;
1369 u32 num_entries;
1370 u32 size;
1371 u32 hook_entry[NF_INET_NUMHOOKS];
1372 u32 underflow[NF_INET_NUMHOOKS];
1373 u32 num_counters;
1374 compat_uptr_t counters; /* struct xt_counters * */
1375 struct compat_ip6t_entry entries[0];
1376 };
1377
1378 static int
1379 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1380 unsigned int *size, struct xt_counters *counters,
1381 unsigned int i)
1382 {
1383 struct xt_entry_target *t;
1384 struct compat_ip6t_entry __user *ce;
1385 u_int16_t target_offset, next_offset;
1386 compat_uint_t origsize;
1387 const struct xt_entry_match *ematch;
1388 int ret = 0;
1389
1390 origsize = *size;
1391 ce = (struct compat_ip6t_entry __user *)*dstptr;
1392 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1393 copy_to_user(&ce->counters, &counters[i],
1394 sizeof(counters[i])) != 0)
1395 return -EFAULT;
1396
1397 *dstptr += sizeof(struct compat_ip6t_entry);
1398 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1399
1400 xt_ematch_foreach(ematch, e) {
1401 ret = xt_compat_match_to_user(ematch, dstptr, size);
1402 if (ret != 0)
1403 return ret;
1404 }
1405 target_offset = e->target_offset - (origsize - *size);
1406 t = ip6t_get_target(e);
1407 ret = xt_compat_target_to_user(t, dstptr, size);
1408 if (ret)
1409 return ret;
1410 next_offset = e->next_offset - (origsize - *size);
1411 if (put_user(target_offset, &ce->target_offset) != 0 ||
1412 put_user(next_offset, &ce->next_offset) != 0)
1413 return -EFAULT;
1414 return 0;
1415 }
1416
1417 static int
1418 compat_find_calc_match(struct xt_entry_match *m,
1419 const struct ip6t_ip6 *ipv6,
1420 unsigned int hookmask,
1421 int *size)
1422 {
1423 struct xt_match *match;
1424
1425 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1426 m->u.user.revision);
1427 if (IS_ERR(match)) {
1428 duprintf("compat_check_calc_match: `%s' not found\n",
1429 m->u.user.name);
1430 return PTR_ERR(match);
1431 }
1432 m->u.kernel.match = match;
1433 *size += xt_compat_match_offset(match);
1434 return 0;
1435 }
1436
1437 static void compat_release_entry(struct compat_ip6t_entry *e)
1438 {
1439 struct xt_entry_target *t;
1440 struct xt_entry_match *ematch;
1441
1442 /* Cleanup all matches */
1443 xt_ematch_foreach(ematch, e)
1444 module_put(ematch->u.kernel.match->me);
1445 t = compat_ip6t_get_target(e);
1446 module_put(t->u.kernel.target->me);
1447 }
1448
1449 static int
1450 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1451 struct xt_table_info *newinfo,
1452 unsigned int *size,
1453 const unsigned char *base,
1454 const unsigned char *limit)
1455 {
1456 struct xt_entry_match *ematch;
1457 struct xt_entry_target *t;
1458 struct xt_target *target;
1459 unsigned int entry_offset;
1460 unsigned int j;
1461 int ret, off;
1462
1463 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1464 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1465 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1466 (unsigned char *)e + e->next_offset > limit) {
1467 duprintf("Bad offset %p, limit = %p\n", e, limit);
1468 return -EINVAL;
1469 }
1470
1471 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1472 sizeof(struct compat_xt_entry_target)) {
1473 duprintf("checking: element %p size %u\n",
1474 e, e->next_offset);
1475 return -EINVAL;
1476 }
1477
1478 /* For purposes of check_entry casting the compat entry is fine */
1479 ret = check_entry((struct ip6t_entry *)e);
1480
1481 if (ret)
1482 return ret;
1483
1484 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1485 entry_offset = (void *)e - (void *)base;
1486 j = 0;
1487 xt_ematch_foreach(ematch, e) {
1488 ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
1489 &off);
1490 if (ret != 0)
1491 goto release_matches;
1492 ++j;
1493 }
1494
1495 t = compat_ip6t_get_target(e);
1496 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1497 t->u.user.revision);
1498 if (IS_ERR(target)) {
1499 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1500 t->u.user.name);
1501 ret = PTR_ERR(target);
1502 goto release_matches;
1503 }
1504 t->u.kernel.target = target;
1505
1506 off += xt_compat_target_offset(target);
1507 *size += off;
1508 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1509 if (ret)
1510 goto out;
1511
1512 return 0;
1513
1514 out:
1515 module_put(t->u.kernel.target->me);
1516 release_matches:
1517 xt_ematch_foreach(ematch, e) {
1518 if (j-- == 0)
1519 break;
1520 module_put(ematch->u.kernel.match->me);
1521 }
1522 return ret;
1523 }
1524
1525 static void
1526 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1527 unsigned int *size,
1528 struct xt_table_info *newinfo, unsigned char *base)
1529 {
1530 struct xt_entry_target *t;
1531 struct ip6t_entry *de;
1532 unsigned int origsize;
1533 int h;
1534 struct xt_entry_match *ematch;
1535
1536 origsize = *size;
1537 de = (struct ip6t_entry *)*dstptr;
1538 memcpy(de, e, sizeof(struct ip6t_entry));
1539 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1540
1541 *dstptr += sizeof(struct ip6t_entry);
1542 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1543
1544 xt_ematch_foreach(ematch, e)
1545 xt_compat_match_from_user(ematch, dstptr, size);
1546
1547 de->target_offset = e->target_offset - (origsize - *size);
1548 t = compat_ip6t_get_target(e);
1549 xt_compat_target_from_user(t, dstptr, size);
1550
1551 de->next_offset = e->next_offset - (origsize - *size);
1552 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1553 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1554 newinfo->hook_entry[h] -= origsize - *size;
1555 if ((unsigned char *)de - base < newinfo->underflow[h])
1556 newinfo->underflow[h] -= origsize - *size;
1557 }
1558 }
1559
1560 static int
1561 translate_compat_table(struct net *net,
1562 struct xt_table_info **pinfo,
1563 void **pentry0,
1564 const struct compat_ip6t_replace *compatr)
1565 {
1566 unsigned int i, j;
1567 struct xt_table_info *newinfo, *info;
1568 void *pos, *entry0, *entry1;
1569 struct compat_ip6t_entry *iter0;
1570 struct ip6t_replace repl;
1571 unsigned int size;
1572 int ret = 0;
1573
1574 info = *pinfo;
1575 entry0 = *pentry0;
1576 size = compatr->size;
1577 info->number = compatr->num_entries;
1578
1579 duprintf("translate_compat_table: size %u\n", info->size);
1580 j = 0;
1581 xt_compat_lock(AF_INET6);
1582 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1583 /* Walk through entries, checking offsets. */
1584 xt_entry_foreach(iter0, entry0, compatr->size) {
1585 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1586 entry0,
1587 entry0 + compatr->size);
1588 if (ret != 0)
1589 goto out_unlock;
1590 ++j;
1591 }
1592
1593 ret = -EINVAL;
1594 if (j != compatr->num_entries) {
1595 duprintf("translate_compat_table: %u not %u entries\n",
1596 j, compatr->num_entries);
1597 goto out_unlock;
1598 }
1599
1600 ret = -ENOMEM;
1601 newinfo = xt_alloc_table_info(size);
1602 if (!newinfo)
1603 goto out_unlock;
1604
1605 newinfo->number = compatr->num_entries;
1606 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1607 newinfo->hook_entry[i] = compatr->hook_entry[i];
1608 newinfo->underflow[i] = compatr->underflow[i];
1609 }
1610 entry1 = newinfo->entries[raw_smp_processor_id()];
1611 pos = entry1;
1612 size = compatr->size;
1613 xt_entry_foreach(iter0, entry0, compatr->size)
1614 compat_copy_entry_from_user(iter0, &pos, &size,
1615 newinfo, entry1);
1616
1617 /* all module references in entry0 are now gone. */
1618 xt_compat_flush_offsets(AF_INET6);
1619 xt_compat_unlock(AF_INET6);
1620
1621 memcpy(&repl, compatr, sizeof(*compatr));
1622
1623 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1624 repl.hook_entry[i] = newinfo->hook_entry[i];
1625 repl.underflow[i] = newinfo->underflow[i];
1626 }
1627
1628 repl.num_counters = 0;
1629 repl.counters = NULL;
1630 repl.size = newinfo->size;
1631 ret = translate_table(net, newinfo, entry1, &repl);
1632 if (ret)
1633 goto free_newinfo;
1634
1635 *pinfo = newinfo;
1636 *pentry0 = entry1;
1637 xt_free_table_info(info);
1638 return 0;
1639
1640 free_newinfo:
1641 xt_free_table_info(newinfo);
1642 return ret;
1643 out_unlock:
1644 xt_compat_flush_offsets(AF_INET6);
1645 xt_compat_unlock(AF_INET6);
1646 xt_entry_foreach(iter0, entry0, compatr->size) {
1647 if (j-- == 0)
1648 break;
1649 compat_release_entry(iter0);
1650 }
1651 return ret;
1652 }
1653
1654 static int
1655 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1656 {
1657 int ret;
1658 struct compat_ip6t_replace tmp;
1659 struct xt_table_info *newinfo;
1660 void *loc_cpu_entry;
1661 struct ip6t_entry *iter;
1662
1663 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1664 return -EFAULT;
1665
1666 /* overflow check */
1667 if (tmp.size >= INT_MAX / num_possible_cpus())
1668 return -ENOMEM;
1669 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1670 return -ENOMEM;
1671 if (tmp.num_counters == 0)
1672 return -EINVAL;
1673
1674 tmp.name[sizeof(tmp.name)-1] = 0;
1675
1676 newinfo = xt_alloc_table_info(tmp.size);
1677 if (!newinfo)
1678 return -ENOMEM;
1679
1680 /* choose the copy that is on our node/cpu */
1681 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1682 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1683 tmp.size) != 0) {
1684 ret = -EFAULT;
1685 goto free_newinfo;
1686 }
1687
1688 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1689 if (ret != 0)
1690 goto free_newinfo;
1691
1692 duprintf("compat_do_replace: Translated table\n");
1693
1694 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1695 tmp.num_counters, compat_ptr(tmp.counters));
1696 if (ret)
1697 goto free_newinfo_untrans;
1698 return 0;
1699
1700 free_newinfo_untrans:
1701 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1702 cleanup_entry(iter, net);
1703 free_newinfo:
1704 xt_free_table_info(newinfo);
1705 return ret;
1706 }
1707
1708 static int
1709 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1710 unsigned int len)
1711 {
1712 int ret;
1713
1714 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1715 return -EPERM;
1716
1717 switch (cmd) {
1718 case IP6T_SO_SET_REPLACE:
1719 ret = compat_do_replace(sock_net(sk), user, len);
1720 break;
1721
1722 case IP6T_SO_SET_ADD_COUNTERS:
1723 ret = do_add_counters(sock_net(sk), user, len, 1);
1724 break;
1725
1726 default:
1727 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1728 ret = -EINVAL;
1729 }
1730
1731 return ret;
1732 }
1733
1734 struct compat_ip6t_get_entries {
1735 char name[XT_TABLE_MAXNAMELEN];
1736 compat_uint_t size;
1737 struct compat_ip6t_entry entrytable[0];
1738 };
1739
1740 static int
1741 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1742 void __user *userptr)
1743 {
1744 struct xt_counters *counters;
1745 const struct xt_table_info *private = table->private;
1746 void __user *pos;
1747 unsigned int size;
1748 int ret = 0;
1749 const void *loc_cpu_entry;
1750 unsigned int i = 0;
1751 struct ip6t_entry *iter;
1752
1753 counters = alloc_counters(table);
1754 if (IS_ERR(counters))
1755 return PTR_ERR(counters);
1756
1757 /* choose the copy that is on our node/cpu, ...
1758 * This choice is lazy (because current thread is
1759 * allowed to migrate to another cpu)
1760 */
1761 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1762 pos = userptr;
1763 size = total_size;
1764 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1765 ret = compat_copy_entry_to_user(iter, &pos,
1766 &size, counters, i++);
1767 if (ret != 0)
1768 break;
1769 }
1770
1771 vfree(counters);
1772 return ret;
1773 }
1774
1775 static int
1776 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1777 int *len)
1778 {
1779 int ret;
1780 struct compat_ip6t_get_entries get;
1781 struct xt_table *t;
1782
1783 if (*len < sizeof(get)) {
1784 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1785 return -EINVAL;
1786 }
1787
1788 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1789 return -EFAULT;
1790
1791 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1792 duprintf("compat_get_entries: %u != %zu\n",
1793 *len, sizeof(get) + get.size);
1794 return -EINVAL;
1795 }
1796
1797 xt_compat_lock(AF_INET6);
1798 t = xt_find_table_lock(net, AF_INET6, get.name);
1799 if (!IS_ERR_OR_NULL(t)) {
1800 const struct xt_table_info *private = t->private;
1801 struct xt_table_info info;
1802 duprintf("t->private->number = %u\n", private->number);
1803 ret = compat_table_info(private, &info);
1804 if (!ret && get.size == info.size) {
1805 ret = compat_copy_entries_to_user(private->size,
1806 t, uptr->entrytable);
1807 } else if (!ret) {
1808 duprintf("compat_get_entries: I've got %u not %u!\n",
1809 private->size, get.size);
1810 ret = -EAGAIN;
1811 }
1812 xt_compat_flush_offsets(AF_INET6);
1813 module_put(t->me);
1814 xt_table_unlock(t);
1815 } else
1816 ret = t ? PTR_ERR(t) : -ENOENT;
1817
1818 xt_compat_unlock(AF_INET6);
1819 return ret;
1820 }
1821
1822 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1823
1824 static int
1825 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1826 {
1827 int ret;
1828
1829 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1830 return -EPERM;
1831
1832 switch (cmd) {
1833 case IP6T_SO_GET_INFO:
1834 ret = get_info(sock_net(sk), user, len, 1);
1835 break;
1836 case IP6T_SO_GET_ENTRIES:
1837 ret = compat_get_entries(sock_net(sk), user, len);
1838 break;
1839 default:
1840 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1841 }
1842 return ret;
1843 }
1844 #endif
1845
1846 static int
1847 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1848 {
1849 int ret;
1850
1851 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1852 return -EPERM;
1853
1854 switch (cmd) {
1855 case IP6T_SO_SET_REPLACE:
1856 ret = do_replace(sock_net(sk), user, len);
1857 break;
1858
1859 case IP6T_SO_SET_ADD_COUNTERS:
1860 ret = do_add_counters(sock_net(sk), user, len, 0);
1861 break;
1862
1863 default:
1864 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1865 ret = -EINVAL;
1866 }
1867
1868 return ret;
1869 }
1870
1871 static int
1872 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1873 {
1874 int ret;
1875
1876 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1877 return -EPERM;
1878
1879 switch (cmd) {
1880 case IP6T_SO_GET_INFO:
1881 ret = get_info(sock_net(sk), user, len, 0);
1882 break;
1883
1884 case IP6T_SO_GET_ENTRIES:
1885 ret = get_entries(sock_net(sk), user, len);
1886 break;
1887
1888 case IP6T_SO_GET_REVISION_MATCH:
1889 case IP6T_SO_GET_REVISION_TARGET: {
1890 struct xt_get_revision rev;
1891 int target;
1892
1893 if (*len != sizeof(rev)) {
1894 ret = -EINVAL;
1895 break;
1896 }
1897 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1898 ret = -EFAULT;
1899 break;
1900 }
1901 rev.name[sizeof(rev.name)-1] = 0;
1902
1903 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1904 target = 1;
1905 else
1906 target = 0;
1907
1908 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1909 rev.revision,
1910 target, &ret),
1911 "ip6t_%s", rev.name);
1912 break;
1913 }
1914
1915 default:
1916 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1917 ret = -EINVAL;
1918 }
1919
1920 return ret;
1921 }
1922
1923 struct xt_table *ip6t_register_table(struct net *net,
1924 const struct xt_table *table,
1925 const struct ip6t_replace *repl)
1926 {
1927 int ret;
1928 struct xt_table_info *newinfo;
1929 struct xt_table_info bootstrap = {0};
1930 void *loc_cpu_entry;
1931 struct xt_table *new_table;
1932
1933 newinfo = xt_alloc_table_info(repl->size);
1934 if (!newinfo) {
1935 ret = -ENOMEM;
1936 goto out;
1937 }
1938
1939 /* choose the copy on our node/cpu, but dont care about preemption */
1940 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1941 memcpy(loc_cpu_entry, repl->entries, repl->size);
1942
1943 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1944 if (ret != 0)
1945 goto out_free;
1946
1947 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1948 if (IS_ERR(new_table)) {
1949 ret = PTR_ERR(new_table);
1950 goto out_free;
1951 }
1952 return new_table;
1953
1954 out_free:
1955 xt_free_table_info(newinfo);
1956 out:
1957 return ERR_PTR(ret);
1958 }
1959
1960 void ip6t_unregister_table(struct net *net, struct xt_table *table)
1961 {
1962 struct xt_table_info *private;
1963 void *loc_cpu_entry;
1964 struct module *table_owner = table->me;
1965 struct ip6t_entry *iter;
1966
1967 private = xt_unregister_table(table);
1968
1969 /* Decrease module usage counts and free resources */
1970 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1971 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1972 cleanup_entry(iter, net);
1973 if (private->number > private->initial_entries)
1974 module_put(table_owner);
1975 xt_free_table_info(private);
1976 }
1977
1978 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1979 static inline bool
1980 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1981 u_int8_t type, u_int8_t code,
1982 bool invert)
1983 {
1984 return (type == test_type && code >= min_code && code <= max_code)
1985 ^ invert;
1986 }
1987
1988 static bool
1989 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1990 {
1991 const struct icmp6hdr *ic;
1992 struct icmp6hdr _icmph;
1993 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1994
1995 /* Must not be a fragment. */
1996 if (par->fragoff != 0)
1997 return false;
1998
1999 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2000 if (ic == NULL) {
2001 /* We've been asked to examine this packet, and we
2002 * can't. Hence, no choice but to drop.
2003 */
2004 duprintf("Dropping evil ICMP tinygram.\n");
2005 par->hotdrop = true;
2006 return false;
2007 }
2008
2009 return icmp6_type_code_match(icmpinfo->type,
2010 icmpinfo->code[0],
2011 icmpinfo->code[1],
2012 ic->icmp6_type, ic->icmp6_code,
2013 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2014 }
2015
2016 /* Called when user tries to insert an entry of this type. */
2017 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2018 {
2019 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2020
2021 /* Must specify no unknown invflags */
2022 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2023 }
2024
2025 /* The built-in targets: standard (NULL) and error. */
2026 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2027 {
2028 .name = XT_STANDARD_TARGET,
2029 .targetsize = sizeof(int),
2030 .family = NFPROTO_IPV6,
2031 #ifdef CONFIG_COMPAT
2032 .compatsize = sizeof(compat_int_t),
2033 .compat_from_user = compat_standard_from_user,
2034 .compat_to_user = compat_standard_to_user,
2035 #endif
2036 },
2037 {
2038 .name = XT_ERROR_TARGET,
2039 .target = ip6t_error,
2040 .targetsize = XT_FUNCTION_MAXNAMELEN,
2041 .family = NFPROTO_IPV6,
2042 },
2043 };
2044
2045 static struct nf_sockopt_ops ip6t_sockopts = {
2046 .pf = PF_INET6,
2047 .set_optmin = IP6T_BASE_CTL,
2048 .set_optmax = IP6T_SO_SET_MAX+1,
2049 .set = do_ip6t_set_ctl,
2050 #ifdef CONFIG_COMPAT
2051 .compat_set = compat_do_ip6t_set_ctl,
2052 #endif
2053 .get_optmin = IP6T_BASE_CTL,
2054 .get_optmax = IP6T_SO_GET_MAX+1,
2055 .get = do_ip6t_get_ctl,
2056 #ifdef CONFIG_COMPAT
2057 .compat_get = compat_do_ip6t_get_ctl,
2058 #endif
2059 .owner = THIS_MODULE,
2060 };
2061
2062 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2063 {
2064 .name = "icmp6",
2065 .match = icmp6_match,
2066 .matchsize = sizeof(struct ip6t_icmp),
2067 .checkentry = icmp6_checkentry,
2068 .proto = IPPROTO_ICMPV6,
2069 .family = NFPROTO_IPV6,
2070 },
2071 };
2072
2073 static int __net_init ip6_tables_net_init(struct net *net)
2074 {
2075 return xt_proto_init(net, NFPROTO_IPV6);
2076 }
2077
2078 static void __net_exit ip6_tables_net_exit(struct net *net)
2079 {
2080 xt_proto_fini(net, NFPROTO_IPV6);
2081 }
2082
2083 static struct pernet_operations ip6_tables_net_ops = {
2084 .init = ip6_tables_net_init,
2085 .exit = ip6_tables_net_exit,
2086 };
2087
2088 static int __init ip6_tables_init(void)
2089 {
2090 int ret;
2091
2092 ret = register_pernet_subsys(&ip6_tables_net_ops);
2093 if (ret < 0)
2094 goto err1;
2095
2096 /* No one else will be downing sem now, so we won't sleep */
2097 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2098 if (ret < 0)
2099 goto err2;
2100 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2101 if (ret < 0)
2102 goto err4;
2103
2104 /* Register setsockopt */
2105 ret = nf_register_sockopt(&ip6t_sockopts);
2106 if (ret < 0)
2107 goto err5;
2108
2109 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2110 return 0;
2111
2112 err5:
2113 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2114 err4:
2115 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2116 err2:
2117 unregister_pernet_subsys(&ip6_tables_net_ops);
2118 err1:
2119 return ret;
2120 }
2121
2122 static void __exit ip6_tables_fini(void)
2123 {
2124 nf_unregister_sockopt(&ip6t_sockopts);
2125
2126 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2127 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2128 unregister_pernet_subsys(&ip6_tables_net_ops);
2129 }
2130
2131 EXPORT_SYMBOL(ip6t_register_table);
2132 EXPORT_SYMBOL(ip6t_unregister_table);
2133 EXPORT_SYMBOL(ip6t_do_table);
2134
2135 module_init(ip6_tables_init);
2136 module_exit(ip6_tables_fini);