Linux 3.10.103
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry *e)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return e->target_offset == sizeof(struct ip6t_entry) &&
203 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
204 }
205
206 static inline const struct xt_entry_target *
207 ip6t_get_target_c(const struct ip6t_entry *e)
208 {
209 return ip6t_get_target((struct ip6t_entry *)e);
210 }
211
212 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
213 /* This cries for unification! */
214 static const char *const hooknames[] = {
215 [NF_INET_PRE_ROUTING] = "PREROUTING",
216 [NF_INET_LOCAL_IN] = "INPUT",
217 [NF_INET_FORWARD] = "FORWARD",
218 [NF_INET_LOCAL_OUT] = "OUTPUT",
219 [NF_INET_POST_ROUTING] = "POSTROUTING",
220 };
221
222 enum nf_ip_trace_comments {
223 NF_IP6_TRACE_COMMENT_RULE,
224 NF_IP6_TRACE_COMMENT_RETURN,
225 NF_IP6_TRACE_COMMENT_POLICY,
226 };
227
228 static const char *const comments[] = {
229 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
230 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
231 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
232 };
233
234 static struct nf_loginfo trace_loginfo = {
235 .type = NF_LOG_TYPE_LOG,
236 .u = {
237 .log = {
238 .level = 4,
239 .logflags = NF_LOG_MASK,
240 },
241 },
242 };
243
244 /* Mildly perf critical (only if packet tracing is on) */
245 static inline int
246 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
247 const char *hookname, const char **chainname,
248 const char **comment, unsigned int *rulenum)
249 {
250 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
251
252 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
253 /* Head of user chain: ERROR target with chainname */
254 *chainname = t->target.data;
255 (*rulenum) = 0;
256 } else if (s == e) {
257 (*rulenum)++;
258
259 if (unconditional(s) &&
260 strcmp(t->target.u.kernel.target->name,
261 XT_STANDARD_TARGET) == 0 &&
262 t->verdict < 0) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
267 }
268 return 1;
269 } else
270 (*rulenum)++;
271
272 return 0;
273 }
274
275 static void trace_packet(const struct sk_buff *skb,
276 unsigned int hook,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
282 {
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
289
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
292
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
299 break;
300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
304 }
305 #endif
306
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
309 {
310 return (void *)entry + entry->next_offset;
311 }
312
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ip6t_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
320 {
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
340 * match it. */
341 acpar.hotdrop = false;
342 acpar.in = in;
343 acpar.out = out;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
346
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348
349 local_bh_disable();
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 cpu = smp_processor_id();
353 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
355 stackptr = per_cpu_ptr(private->stackptr, cpu);
356 origptr = *stackptr;
357
358 e = get_entry(table_base, private->hook_entry[hook]);
359
360 do {
361 const struct xt_entry_target *t;
362 const struct xt_entry_match *ematch;
363
364 IP_NF_ASSERT(e);
365 acpar.thoff = 0;
366 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
367 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
368 no_match:
369 e = ip6t_next_entry(e);
370 continue;
371 }
372
373 xt_ematch_foreach(ematch, e) {
374 acpar.match = ematch->u.kernel.match;
375 acpar.matchinfo = ematch->data;
376 if (!acpar.match->match(skb, &acpar))
377 goto no_match;
378 }
379
380 ADD_COUNTER(e->counters, skb->len, 1);
381
382 t = ip6t_get_target_c(e);
383 IP_NF_ASSERT(t->u.kernel.target);
384
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out,
389 table->name, private, e);
390 #endif
391 /* Standard target? */
392 if (!t->u.kernel.target->target) {
393 int v;
394
395 v = ((struct xt_standard_target *)t)->verdict;
396 if (v < 0) {
397 /* Pop from stack? */
398 if (v != XT_RETURN) {
399 verdict = (unsigned int)(-v) - 1;
400 break;
401 }
402 if (*stackptr <= origptr)
403 e = get_entry(table_base,
404 private->underflow[hook]);
405 else
406 e = ip6t_next_entry(jumpstack[--*stackptr]);
407 continue;
408 }
409 if (table_base + v != ip6t_next_entry(e) &&
410 !(e->ipv6.flags & IP6T_F_GOTO)) {
411 if (*stackptr >= private->stacksize) {
412 verdict = NF_DROP;
413 break;
414 }
415 jumpstack[(*stackptr)++] = e;
416 }
417
418 e = get_entry(table_base, v);
419 continue;
420 }
421
422 acpar.target = t->u.kernel.target;
423 acpar.targinfo = t->data;
424
425 verdict = t->u.kernel.target->target(skb, &acpar);
426 if (verdict == XT_CONTINUE)
427 e = ip6t_next_entry(e);
428 else
429 /* Verdict */
430 break;
431 } while (!acpar.hotdrop);
432
433 *stackptr = origptr;
434
435 xt_write_recseq_end(addend);
436 local_bh_enable();
437
438 #ifdef DEBUG_ALLOW_ALL
439 return NF_ACCEPT;
440 #else
441 if (acpar.hotdrop)
442 return NF_DROP;
443 else return verdict;
444 #endif
445 }
446
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
449 static int
450 mark_source_chains(const struct xt_table_info *newinfo,
451 unsigned int valid_hooks, void *entry0)
452 {
453 unsigned int hook;
454
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 unsigned int pos = newinfo->hook_entry[hook];
459 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
460
461 if (!(valid_hooks & (1 << hook)))
462 continue;
463
464 /* Set initial back pointer. */
465 e->counters.pcnt = pos;
466
467 for (;;) {
468 const struct xt_standard_target *t
469 = (void *)ip6t_get_target_c(e);
470 int visited = e->comefrom & (1 << hook);
471
472 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook, pos, e->comefrom);
475 return 0;
476 }
477 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478
479 /* Unconditional return/END. */
480 if ((unconditional(e) &&
481 (strcmp(t->target.u.user.name,
482 XT_STANDARD_TARGET) == 0) &&
483 t->verdict < 0) || visited) {
484 unsigned int oldpos, size;
485
486 if ((strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0) &&
488 t->verdict < -NF_MAX_VERDICT - 1) {
489 duprintf("mark_source_chains: bad "
490 "negative verdict (%i)\n",
491 t->verdict);
492 return 0;
493 }
494
495 /* Return: backtrack through the last
496 big jump. */
497 do {
498 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
499 #ifdef DEBUG_IP_FIREWALL_USER
500 if (e->comefrom
501 & (1 << NF_INET_NUMHOOKS)) {
502 duprintf("Back unset "
503 "on hook %u "
504 "rule %u\n",
505 hook, pos);
506 }
507 #endif
508 oldpos = pos;
509 pos = e->counters.pcnt;
510 e->counters.pcnt = 0;
511
512 /* We're at the start. */
513 if (pos == oldpos)
514 goto next;
515
516 e = (struct ip6t_entry *)
517 (entry0 + pos);
518 } while (oldpos == pos + e->next_offset);
519
520 /* Move along one */
521 size = e->next_offset;
522 e = (struct ip6t_entry *)
523 (entry0 + pos + size);
524 if (pos + size >= newinfo->size)
525 return 0;
526 e->counters.pcnt = pos;
527 pos += size;
528 } else {
529 int newpos = t->verdict;
530
531 if (strcmp(t->target.u.user.name,
532 XT_STANDARD_TARGET) == 0 &&
533 newpos >= 0) {
534 if (newpos > newinfo->size -
535 sizeof(struct ip6t_entry)) {
536 duprintf("mark_source_chains: "
537 "bad verdict (%i)\n",
538 newpos);
539 return 0;
540 }
541 /* This a jump; chase it. */
542 duprintf("Jump rule %u -> %u\n",
543 pos, newpos);
544 } else {
545 /* ... this is a fallthru */
546 newpos = pos + e->next_offset;
547 if (newpos >= newinfo->size)
548 return 0;
549 }
550 e = (struct ip6t_entry *)
551 (entry0 + newpos);
552 e->counters.pcnt = pos;
553 pos = newpos;
554 }
555 }
556 next:
557 duprintf("Finished chain %u\n", hook);
558 }
559 return 1;
560 }
561
562 static void cleanup_match(struct xt_entry_match *m, struct net *net)
563 {
564 struct xt_mtdtor_param par;
565
566 par.net = net;
567 par.match = m->u.kernel.match;
568 par.matchinfo = m->data;
569 par.family = NFPROTO_IPV6;
570 if (par.match->destroy != NULL)
571 par.match->destroy(&par);
572 module_put(par.match->me);
573 }
574
575 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
576 {
577 const struct ip6t_ip6 *ipv6 = par->entryinfo;
578 int ret;
579
580 par->match = m->u.kernel.match;
581 par->matchinfo = m->data;
582
583 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
584 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
585 if (ret < 0) {
586 duprintf("ip_tables: check failed for `%s'.\n",
587 par.match->name);
588 return ret;
589 }
590 return 0;
591 }
592
593 static int
594 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
595 {
596 struct xt_match *match;
597 int ret;
598
599 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
600 m->u.user.revision);
601 if (IS_ERR(match)) {
602 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
603 return PTR_ERR(match);
604 }
605 m->u.kernel.match = match;
606
607 ret = check_match(m, par);
608 if (ret)
609 goto err;
610
611 return 0;
612 err:
613 module_put(m->u.kernel.match->me);
614 return ret;
615 }
616
617 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
618 {
619 struct xt_entry_target *t = ip6t_get_target(e);
620 struct xt_tgchk_param par = {
621 .net = net,
622 .table = name,
623 .entryinfo = e,
624 .target = t->u.kernel.target,
625 .targinfo = t->data,
626 .hook_mask = e->comefrom,
627 .family = NFPROTO_IPV6,
628 };
629 int ret;
630
631 t = ip6t_get_target(e);
632 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
633 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
634 if (ret < 0) {
635 duprintf("ip_tables: check failed for `%s'.\n",
636 t->u.kernel.target->name);
637 return ret;
638 }
639 return 0;
640 }
641
642 static int
643 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
644 unsigned int size)
645 {
646 struct xt_entry_target *t;
647 struct xt_target *target;
648 int ret;
649 unsigned int j;
650 struct xt_mtchk_param mtpar;
651 struct xt_entry_match *ematch;
652
653 j = 0;
654 mtpar.net = net;
655 mtpar.table = name;
656 mtpar.entryinfo = &e->ipv6;
657 mtpar.hook_mask = e->comefrom;
658 mtpar.family = NFPROTO_IPV6;
659 xt_ematch_foreach(ematch, e) {
660 ret = find_check_match(ematch, &mtpar);
661 if (ret != 0)
662 goto cleanup_matches;
663 ++j;
664 }
665
666 t = ip6t_get_target(e);
667 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
668 t->u.user.revision);
669 if (IS_ERR(target)) {
670 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
671 ret = PTR_ERR(target);
672 goto cleanup_matches;
673 }
674 t->u.kernel.target = target;
675
676 ret = check_target(e, net, name);
677 if (ret)
678 goto err;
679 return 0;
680 err:
681 module_put(t->u.kernel.target->me);
682 cleanup_matches:
683 xt_ematch_foreach(ematch, e) {
684 if (j-- == 0)
685 break;
686 cleanup_match(ematch, net);
687 }
688 return ret;
689 }
690
691 static bool check_underflow(const struct ip6t_entry *e)
692 {
693 const struct xt_entry_target *t;
694 unsigned int verdict;
695
696 if (!unconditional(e))
697 return false;
698 t = ip6t_get_target_c(e);
699 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
700 return false;
701 verdict = ((struct xt_standard_target *)t)->verdict;
702 verdict = -verdict - 1;
703 return verdict == NF_DROP || verdict == NF_ACCEPT;
704 }
705
706 static int
707 check_entry_size_and_hooks(struct ip6t_entry *e,
708 struct xt_table_info *newinfo,
709 const unsigned char *base,
710 const unsigned char *limit,
711 const unsigned int *hook_entries,
712 const unsigned int *underflows,
713 unsigned int valid_hooks)
714 {
715 unsigned int h;
716 int err;
717
718 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
719 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
720 (unsigned char *)e + e->next_offset > limit) {
721 duprintf("Bad offset %p\n", e);
722 return -EINVAL;
723 }
724
725 if (e->next_offset
726 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
727 duprintf("checking: element %p size %u\n",
728 e, e->next_offset);
729 return -EINVAL;
730 }
731
732 if (!ip6_checkentry(&e->ipv6))
733 return -EINVAL;
734
735 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
736 e->next_offset);
737 if (err)
738 return err;
739
740 /* Check hooks & underflows */
741 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
742 if (!(valid_hooks & (1 << h)))
743 continue;
744 if ((unsigned char *)e - base == hook_entries[h])
745 newinfo->hook_entry[h] = hook_entries[h];
746 if ((unsigned char *)e - base == underflows[h]) {
747 if (!check_underflow(e)) {
748 pr_debug("Underflows must be unconditional and "
749 "use the STANDARD target with "
750 "ACCEPT/DROP\n");
751 return -EINVAL;
752 }
753 newinfo->underflow[h] = underflows[h];
754 }
755 }
756
757 /* Clear counters and comefrom */
758 e->counters = ((struct xt_counters) { 0, 0 });
759 e->comefrom = 0;
760 return 0;
761 }
762
763 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
764 {
765 struct xt_tgdtor_param par;
766 struct xt_entry_target *t;
767 struct xt_entry_match *ematch;
768
769 /* Cleanup all matches */
770 xt_ematch_foreach(ematch, e)
771 cleanup_match(ematch, net);
772 t = ip6t_get_target(e);
773
774 par.net = net;
775 par.target = t->u.kernel.target;
776 par.targinfo = t->data;
777 par.family = NFPROTO_IPV6;
778 if (par.target->destroy != NULL)
779 par.target->destroy(&par);
780 module_put(par.target->me);
781 }
782
783 /* Checks and translates the user-supplied table segment (held in
784 newinfo) */
785 static int
786 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
787 const struct ip6t_replace *repl)
788 {
789 struct ip6t_entry *iter;
790 unsigned int i;
791 int ret = 0;
792
793 newinfo->size = repl->size;
794 newinfo->number = repl->num_entries;
795
796 /* Init all hooks to impossible value. */
797 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
798 newinfo->hook_entry[i] = 0xFFFFFFFF;
799 newinfo->underflow[i] = 0xFFFFFFFF;
800 }
801
802 duprintf("translate_table: size %u\n", newinfo->size);
803 i = 0;
804 /* Walk through entries, checking offsets. */
805 xt_entry_foreach(iter, entry0, newinfo->size) {
806 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
807 entry0 + repl->size,
808 repl->hook_entry,
809 repl->underflow,
810 repl->valid_hooks);
811 if (ret != 0)
812 return ret;
813 ++i;
814 if (strcmp(ip6t_get_target(iter)->u.user.name,
815 XT_ERROR_TARGET) == 0)
816 ++newinfo->stacksize;
817 }
818
819 if (i != repl->num_entries) {
820 duprintf("translate_table: %u not %u entries\n",
821 i, repl->num_entries);
822 return -EINVAL;
823 }
824
825 /* Check hooks all assigned */
826 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
827 /* Only hooks which are valid */
828 if (!(repl->valid_hooks & (1 << i)))
829 continue;
830 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
831 duprintf("Invalid hook entry %u %u\n",
832 i, repl->hook_entry[i]);
833 return -EINVAL;
834 }
835 if (newinfo->underflow[i] == 0xFFFFFFFF) {
836 duprintf("Invalid underflow %u %u\n",
837 i, repl->underflow[i]);
838 return -EINVAL;
839 }
840 }
841
842 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
843 return -ELOOP;
844
845 /* Finally, each sanity check must pass */
846 i = 0;
847 xt_entry_foreach(iter, entry0, newinfo->size) {
848 ret = find_check_entry(iter, net, repl->name, repl->size);
849 if (ret != 0)
850 break;
851 ++i;
852 }
853
854 if (ret != 0) {
855 xt_entry_foreach(iter, entry0, newinfo->size) {
856 if (i-- == 0)
857 break;
858 cleanup_entry(iter, net);
859 }
860 return ret;
861 }
862
863 /* And one copy for every other CPU */
864 for_each_possible_cpu(i) {
865 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
866 memcpy(newinfo->entries[i], entry0, newinfo->size);
867 }
868
869 return ret;
870 }
871
872 static void
873 get_counters(const struct xt_table_info *t,
874 struct xt_counters counters[])
875 {
876 struct ip6t_entry *iter;
877 unsigned int cpu;
878 unsigned int i;
879
880 for_each_possible_cpu(cpu) {
881 seqcount_t *s = &per_cpu(xt_recseq, cpu);
882
883 i = 0;
884 xt_entry_foreach(iter, t->entries[cpu], t->size) {
885 u64 bcnt, pcnt;
886 unsigned int start;
887
888 do {
889 start = read_seqcount_begin(s);
890 bcnt = iter->counters.bcnt;
891 pcnt = iter->counters.pcnt;
892 } while (read_seqcount_retry(s, start));
893
894 ADD_COUNTER(counters[i], bcnt, pcnt);
895 ++i;
896 }
897 }
898 }
899
900 static struct xt_counters *alloc_counters(const struct xt_table *table)
901 {
902 unsigned int countersize;
903 struct xt_counters *counters;
904 const struct xt_table_info *private = table->private;
905
906 /* We need atomic snapshot of counters: rest doesn't change
907 (other than comefrom, which userspace doesn't care
908 about). */
909 countersize = sizeof(struct xt_counters) * private->number;
910 counters = vzalloc(countersize);
911
912 if (counters == NULL)
913 return ERR_PTR(-ENOMEM);
914
915 get_counters(private, counters);
916
917 return counters;
918 }
919
920 static int
921 copy_entries_to_user(unsigned int total_size,
922 const struct xt_table *table,
923 void __user *userptr)
924 {
925 unsigned int off, num;
926 const struct ip6t_entry *e;
927 struct xt_counters *counters;
928 const struct xt_table_info *private = table->private;
929 int ret = 0;
930 const void *loc_cpu_entry;
931
932 counters = alloc_counters(table);
933 if (IS_ERR(counters))
934 return PTR_ERR(counters);
935
936 /* choose the copy that is on our node/cpu, ...
937 * This choice is lazy (because current thread is
938 * allowed to migrate to another cpu)
939 */
940 loc_cpu_entry = private->entries[raw_smp_processor_id()];
941 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
942 ret = -EFAULT;
943 goto free_counters;
944 }
945
946 /* FIXME: use iterator macros --RR */
947 /* ... then go back and fix counters and names */
948 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
949 unsigned int i;
950 const struct xt_entry_match *m;
951 const struct xt_entry_target *t;
952
953 e = (struct ip6t_entry *)(loc_cpu_entry + off);
954 if (copy_to_user(userptr + off
955 + offsetof(struct ip6t_entry, counters),
956 &counters[num],
957 sizeof(counters[num])) != 0) {
958 ret = -EFAULT;
959 goto free_counters;
960 }
961
962 for (i = sizeof(struct ip6t_entry);
963 i < e->target_offset;
964 i += m->u.match_size) {
965 m = (void *)e + i;
966
967 if (copy_to_user(userptr + off + i
968 + offsetof(struct xt_entry_match,
969 u.user.name),
970 m->u.kernel.match->name,
971 strlen(m->u.kernel.match->name)+1)
972 != 0) {
973 ret = -EFAULT;
974 goto free_counters;
975 }
976 }
977
978 t = ip6t_get_target_c(e);
979 if (copy_to_user(userptr + off + e->target_offset
980 + offsetof(struct xt_entry_target,
981 u.user.name),
982 t->u.kernel.target->name,
983 strlen(t->u.kernel.target->name)+1) != 0) {
984 ret = -EFAULT;
985 goto free_counters;
986 }
987 }
988
989 free_counters:
990 vfree(counters);
991 return ret;
992 }
993
994 #ifdef CONFIG_COMPAT
995 static void compat_standard_from_user(void *dst, const void *src)
996 {
997 int v = *(compat_int_t *)src;
998
999 if (v > 0)
1000 v += xt_compat_calc_jump(AF_INET6, v);
1001 memcpy(dst, &v, sizeof(v));
1002 }
1003
1004 static int compat_standard_to_user(void __user *dst, const void *src)
1005 {
1006 compat_int_t cv = *(int *)src;
1007
1008 if (cv > 0)
1009 cv -= xt_compat_calc_jump(AF_INET6, cv);
1010 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1011 }
1012
1013 static int compat_calc_entry(const struct ip6t_entry *e,
1014 const struct xt_table_info *info,
1015 const void *base, struct xt_table_info *newinfo)
1016 {
1017 const struct xt_entry_match *ematch;
1018 const struct xt_entry_target *t;
1019 unsigned int entry_offset;
1020 int off, i, ret;
1021
1022 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1023 entry_offset = (void *)e - base;
1024 xt_ematch_foreach(ematch, e)
1025 off += xt_compat_match_offset(ematch->u.kernel.match);
1026 t = ip6t_get_target_c(e);
1027 off += xt_compat_target_offset(t->u.kernel.target);
1028 newinfo->size -= off;
1029 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1030 if (ret)
1031 return ret;
1032
1033 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1034 if (info->hook_entry[i] &&
1035 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1036 newinfo->hook_entry[i] -= off;
1037 if (info->underflow[i] &&
1038 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1039 newinfo->underflow[i] -= off;
1040 }
1041 return 0;
1042 }
1043
1044 static int compat_table_info(const struct xt_table_info *info,
1045 struct xt_table_info *newinfo)
1046 {
1047 struct ip6t_entry *iter;
1048 void *loc_cpu_entry;
1049 int ret;
1050
1051 if (!newinfo || !info)
1052 return -EINVAL;
1053
1054 /* we dont care about newinfo->entries[] */
1055 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1056 newinfo->initial_entries = 0;
1057 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1058 xt_compat_init_offsets(AF_INET6, info->number);
1059 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1060 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1061 if (ret != 0)
1062 return ret;
1063 }
1064 return 0;
1065 }
1066 #endif
1067
1068 static int get_info(struct net *net, void __user *user,
1069 const int *len, int compat)
1070 {
1071 char name[XT_TABLE_MAXNAMELEN];
1072 struct xt_table *t;
1073 int ret;
1074
1075 if (*len != sizeof(struct ip6t_getinfo)) {
1076 duprintf("length %u != %zu\n", *len,
1077 sizeof(struct ip6t_getinfo));
1078 return -EINVAL;
1079 }
1080
1081 if (copy_from_user(name, user, sizeof(name)) != 0)
1082 return -EFAULT;
1083
1084 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1085 #ifdef CONFIG_COMPAT
1086 if (compat)
1087 xt_compat_lock(AF_INET6);
1088 #endif
1089 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1090 "ip6table_%s", name);
1091 if (!IS_ERR_OR_NULL(t)) {
1092 struct ip6t_getinfo info;
1093 const struct xt_table_info *private = t->private;
1094 #ifdef CONFIG_COMPAT
1095 struct xt_table_info tmp;
1096
1097 if (compat) {
1098 ret = compat_table_info(private, &tmp);
1099 xt_compat_flush_offsets(AF_INET6);
1100 private = &tmp;
1101 }
1102 #endif
1103 memset(&info, 0, sizeof(info));
1104 info.valid_hooks = t->valid_hooks;
1105 memcpy(info.hook_entry, private->hook_entry,
1106 sizeof(info.hook_entry));
1107 memcpy(info.underflow, private->underflow,
1108 sizeof(info.underflow));
1109 info.num_entries = private->number;
1110 info.size = private->size;
1111 strcpy(info.name, name);
1112
1113 if (copy_to_user(user, &info, *len) != 0)
1114 ret = -EFAULT;
1115 else
1116 ret = 0;
1117
1118 xt_table_unlock(t);
1119 module_put(t->me);
1120 } else
1121 ret = t ? PTR_ERR(t) : -ENOENT;
1122 #ifdef CONFIG_COMPAT
1123 if (compat)
1124 xt_compat_unlock(AF_INET6);
1125 #endif
1126 return ret;
1127 }
1128
1129 static int
1130 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1131 const int *len)
1132 {
1133 int ret;
1134 struct ip6t_get_entries get;
1135 struct xt_table *t;
1136
1137 if (*len < sizeof(get)) {
1138 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1139 return -EINVAL;
1140 }
1141 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1142 return -EFAULT;
1143 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1144 duprintf("get_entries: %u != %zu\n",
1145 *len, sizeof(get) + get.size);
1146 return -EINVAL;
1147 }
1148
1149 t = xt_find_table_lock(net, AF_INET6, get.name);
1150 if (!IS_ERR_OR_NULL(t)) {
1151 struct xt_table_info *private = t->private;
1152 duprintf("t->private->number = %u\n", private->number);
1153 if (get.size == private->size)
1154 ret = copy_entries_to_user(private->size,
1155 t, uptr->entrytable);
1156 else {
1157 duprintf("get_entries: I've got %u not %u!\n",
1158 private->size, get.size);
1159 ret = -EAGAIN;
1160 }
1161 module_put(t->me);
1162 xt_table_unlock(t);
1163 } else
1164 ret = t ? PTR_ERR(t) : -ENOENT;
1165
1166 return ret;
1167 }
1168
1169 static int
1170 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1171 struct xt_table_info *newinfo, unsigned int num_counters,
1172 void __user *counters_ptr)
1173 {
1174 int ret;
1175 struct xt_table *t;
1176 struct xt_table_info *oldinfo;
1177 struct xt_counters *counters;
1178 const void *loc_cpu_old_entry;
1179 struct ip6t_entry *iter;
1180
1181 ret = 0;
1182 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1183 if (!counters) {
1184 ret = -ENOMEM;
1185 goto out;
1186 }
1187
1188 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1189 "ip6table_%s", name);
1190 if (IS_ERR_OR_NULL(t)) {
1191 ret = t ? PTR_ERR(t) : -ENOENT;
1192 goto free_newinfo_counters_untrans;
1193 }
1194
1195 /* You lied! */
1196 if (valid_hooks != t->valid_hooks) {
1197 duprintf("Valid hook crap: %08X vs %08X\n",
1198 valid_hooks, t->valid_hooks);
1199 ret = -EINVAL;
1200 goto put_module;
1201 }
1202
1203 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1204 if (!oldinfo)
1205 goto put_module;
1206
1207 /* Update module usage count based on number of rules */
1208 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1209 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1210 if ((oldinfo->number > oldinfo->initial_entries) ||
1211 (newinfo->number <= oldinfo->initial_entries))
1212 module_put(t->me);
1213 if ((oldinfo->number > oldinfo->initial_entries) &&
1214 (newinfo->number <= oldinfo->initial_entries))
1215 module_put(t->me);
1216
1217 /* Get the old counters, and synchronize with replace */
1218 get_counters(oldinfo, counters);
1219
1220 /* Decrease module usage counts and free resource */
1221 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1222 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1223 cleanup_entry(iter, net);
1224
1225 xt_free_table_info(oldinfo);
1226 if (copy_to_user(counters_ptr, counters,
1227 sizeof(struct xt_counters) * num_counters) != 0) {
1228 /* Silent error, can't fail, new table is already in place */
1229 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1230 }
1231 vfree(counters);
1232 xt_table_unlock(t);
1233 return ret;
1234
1235 put_module:
1236 module_put(t->me);
1237 xt_table_unlock(t);
1238 free_newinfo_counters_untrans:
1239 vfree(counters);
1240 out:
1241 return ret;
1242 }
1243
1244 static int
1245 do_replace(struct net *net, const void __user *user, unsigned int len)
1246 {
1247 int ret;
1248 struct ip6t_replace tmp;
1249 struct xt_table_info *newinfo;
1250 void *loc_cpu_entry;
1251 struct ip6t_entry *iter;
1252
1253 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1254 return -EFAULT;
1255
1256 /* overflow check */
1257 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1258 return -ENOMEM;
1259 if (tmp.num_counters == 0)
1260 return -EINVAL;
1261
1262 tmp.name[sizeof(tmp.name)-1] = 0;
1263
1264 newinfo = xt_alloc_table_info(tmp.size);
1265 if (!newinfo)
1266 return -ENOMEM;
1267
1268 /* choose the copy that is on our node/cpu */
1269 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1270 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1271 tmp.size) != 0) {
1272 ret = -EFAULT;
1273 goto free_newinfo;
1274 }
1275
1276 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1277 if (ret != 0)
1278 goto free_newinfo;
1279
1280 duprintf("ip_tables: Translated table\n");
1281
1282 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1283 tmp.num_counters, tmp.counters);
1284 if (ret)
1285 goto free_newinfo_untrans;
1286 return 0;
1287
1288 free_newinfo_untrans:
1289 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1290 cleanup_entry(iter, net);
1291 free_newinfo:
1292 xt_free_table_info(newinfo);
1293 return ret;
1294 }
1295
1296 static int
1297 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1298 int compat)
1299 {
1300 unsigned int i, curcpu;
1301 struct xt_counters_info tmp;
1302 struct xt_counters *paddc;
1303 struct xt_table *t;
1304 const struct xt_table_info *private;
1305 int ret = 0;
1306 const void *loc_cpu_entry;
1307 struct ip6t_entry *iter;
1308 unsigned int addend;
1309
1310 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1311 if (IS_ERR(paddc))
1312 return PTR_ERR(paddc);
1313 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1314 if (IS_ERR_OR_NULL(t)) {
1315 ret = t ? PTR_ERR(t) : -ENOENT;
1316 goto free;
1317 }
1318
1319
1320 local_bh_disable();
1321 private = t->private;
1322 if (private->number != tmp.num_counters) {
1323 ret = -EINVAL;
1324 goto unlock_up_free;
1325 }
1326
1327 i = 0;
1328 /* Choose the copy that is on our node */
1329 curcpu = smp_processor_id();
1330 addend = xt_write_recseq_begin();
1331 loc_cpu_entry = private->entries[curcpu];
1332 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1333 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1334 ++i;
1335 }
1336 xt_write_recseq_end(addend);
1337
1338 unlock_up_free:
1339 local_bh_enable();
1340 xt_table_unlock(t);
1341 module_put(t->me);
1342 free:
1343 vfree(paddc);
1344
1345 return ret;
1346 }
1347
1348 #ifdef CONFIG_COMPAT
1349 struct compat_ip6t_replace {
1350 char name[XT_TABLE_MAXNAMELEN];
1351 u32 valid_hooks;
1352 u32 num_entries;
1353 u32 size;
1354 u32 hook_entry[NF_INET_NUMHOOKS];
1355 u32 underflow[NF_INET_NUMHOOKS];
1356 u32 num_counters;
1357 compat_uptr_t counters; /* struct xt_counters * */
1358 struct compat_ip6t_entry entries[0];
1359 };
1360
1361 static int
1362 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1363 unsigned int *size, struct xt_counters *counters,
1364 unsigned int i)
1365 {
1366 struct xt_entry_target *t;
1367 struct compat_ip6t_entry __user *ce;
1368 u_int16_t target_offset, next_offset;
1369 compat_uint_t origsize;
1370 const struct xt_entry_match *ematch;
1371 int ret = 0;
1372
1373 origsize = *size;
1374 ce = (struct compat_ip6t_entry __user *)*dstptr;
1375 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1376 copy_to_user(&ce->counters, &counters[i],
1377 sizeof(counters[i])) != 0)
1378 return -EFAULT;
1379
1380 *dstptr += sizeof(struct compat_ip6t_entry);
1381 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1382
1383 xt_ematch_foreach(ematch, e) {
1384 ret = xt_compat_match_to_user(ematch, dstptr, size);
1385 if (ret != 0)
1386 return ret;
1387 }
1388 target_offset = e->target_offset - (origsize - *size);
1389 t = ip6t_get_target(e);
1390 ret = xt_compat_target_to_user(t, dstptr, size);
1391 if (ret)
1392 return ret;
1393 next_offset = e->next_offset - (origsize - *size);
1394 if (put_user(target_offset, &ce->target_offset) != 0 ||
1395 put_user(next_offset, &ce->next_offset) != 0)
1396 return -EFAULT;
1397 return 0;
1398 }
1399
1400 static int
1401 compat_find_calc_match(struct xt_entry_match *m,
1402 const struct ip6t_ip6 *ipv6,
1403 unsigned int hookmask,
1404 int *size)
1405 {
1406 struct xt_match *match;
1407
1408 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1409 m->u.user.revision);
1410 if (IS_ERR(match)) {
1411 duprintf("compat_check_calc_match: `%s' not found\n",
1412 m->u.user.name);
1413 return PTR_ERR(match);
1414 }
1415 m->u.kernel.match = match;
1416 *size += xt_compat_match_offset(match);
1417 return 0;
1418 }
1419
1420 static void compat_release_entry(struct compat_ip6t_entry *e)
1421 {
1422 struct xt_entry_target *t;
1423 struct xt_entry_match *ematch;
1424
1425 /* Cleanup all matches */
1426 xt_ematch_foreach(ematch, e)
1427 module_put(ematch->u.kernel.match->me);
1428 t = compat_ip6t_get_target(e);
1429 module_put(t->u.kernel.target->me);
1430 }
1431
1432 static int
1433 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1434 struct xt_table_info *newinfo,
1435 unsigned int *size,
1436 const unsigned char *base,
1437 const unsigned char *limit)
1438 {
1439 struct xt_entry_match *ematch;
1440 struct xt_entry_target *t;
1441 struct xt_target *target;
1442 unsigned int entry_offset;
1443 unsigned int j;
1444 int ret, off;
1445
1446 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1447 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1448 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1449 (unsigned char *)e + e->next_offset > limit) {
1450 duprintf("Bad offset %p, limit = %p\n", e, limit);
1451 return -EINVAL;
1452 }
1453
1454 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1455 sizeof(struct compat_xt_entry_target)) {
1456 duprintf("checking: element %p size %u\n",
1457 e, e->next_offset);
1458 return -EINVAL;
1459 }
1460
1461 if (!ip6_checkentry(&e->ipv6))
1462 return -EINVAL;
1463
1464 ret = xt_compat_check_entry_offsets(e, e->elems,
1465 e->target_offset, e->next_offset);
1466 if (ret)
1467 return ret;
1468
1469 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1470 entry_offset = (void *)e - (void *)base;
1471 j = 0;
1472 xt_ematch_foreach(ematch, e) {
1473 ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
1474 &off);
1475 if (ret != 0)
1476 goto release_matches;
1477 ++j;
1478 }
1479
1480 t = compat_ip6t_get_target(e);
1481 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1482 t->u.user.revision);
1483 if (IS_ERR(target)) {
1484 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1485 t->u.user.name);
1486 ret = PTR_ERR(target);
1487 goto release_matches;
1488 }
1489 t->u.kernel.target = target;
1490
1491 off += xt_compat_target_offset(target);
1492 *size += off;
1493 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1494 if (ret)
1495 goto out;
1496
1497 return 0;
1498
1499 out:
1500 module_put(t->u.kernel.target->me);
1501 release_matches:
1502 xt_ematch_foreach(ematch, e) {
1503 if (j-- == 0)
1504 break;
1505 module_put(ematch->u.kernel.match->me);
1506 }
1507 return ret;
1508 }
1509
1510 static void
1511 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1512 unsigned int *size,
1513 struct xt_table_info *newinfo, unsigned char *base)
1514 {
1515 struct xt_entry_target *t;
1516 struct ip6t_entry *de;
1517 unsigned int origsize;
1518 int h;
1519 struct xt_entry_match *ematch;
1520
1521 origsize = *size;
1522 de = (struct ip6t_entry *)*dstptr;
1523 memcpy(de, e, sizeof(struct ip6t_entry));
1524 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1525
1526 *dstptr += sizeof(struct ip6t_entry);
1527 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1528
1529 xt_ematch_foreach(ematch, e)
1530 xt_compat_match_from_user(ematch, dstptr, size);
1531
1532 de->target_offset = e->target_offset - (origsize - *size);
1533 t = compat_ip6t_get_target(e);
1534 xt_compat_target_from_user(t, dstptr, size);
1535
1536 de->next_offset = e->next_offset - (origsize - *size);
1537 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1538 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1539 newinfo->hook_entry[h] -= origsize - *size;
1540 if ((unsigned char *)de - base < newinfo->underflow[h])
1541 newinfo->underflow[h] -= origsize - *size;
1542 }
1543 }
1544
1545 static int
1546 translate_compat_table(struct net *net,
1547 struct xt_table_info **pinfo,
1548 void **pentry0,
1549 const struct compat_ip6t_replace *compatr)
1550 {
1551 unsigned int i, j;
1552 struct xt_table_info *newinfo, *info;
1553 void *pos, *entry0, *entry1;
1554 struct compat_ip6t_entry *iter0;
1555 struct ip6t_replace repl;
1556 unsigned int size;
1557 int ret = 0;
1558
1559 info = *pinfo;
1560 entry0 = *pentry0;
1561 size = compatr->size;
1562 info->number = compatr->num_entries;
1563
1564 duprintf("translate_compat_table: size %u\n", info->size);
1565 j = 0;
1566 xt_compat_lock(AF_INET6);
1567 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1568 /* Walk through entries, checking offsets. */
1569 xt_entry_foreach(iter0, entry0, compatr->size) {
1570 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1571 entry0,
1572 entry0 + compatr->size);
1573 if (ret != 0)
1574 goto out_unlock;
1575 ++j;
1576 }
1577
1578 ret = -EINVAL;
1579 if (j != compatr->num_entries) {
1580 duprintf("translate_compat_table: %u not %u entries\n",
1581 j, compatr->num_entries);
1582 goto out_unlock;
1583 }
1584
1585 ret = -ENOMEM;
1586 newinfo = xt_alloc_table_info(size);
1587 if (!newinfo)
1588 goto out_unlock;
1589
1590 newinfo->number = compatr->num_entries;
1591 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1592 newinfo->hook_entry[i] = compatr->hook_entry[i];
1593 newinfo->underflow[i] = compatr->underflow[i];
1594 }
1595 entry1 = newinfo->entries[raw_smp_processor_id()];
1596 pos = entry1;
1597 size = compatr->size;
1598 xt_entry_foreach(iter0, entry0, compatr->size)
1599 compat_copy_entry_from_user(iter0, &pos, &size,
1600 newinfo, entry1);
1601
1602 /* all module references in entry0 are now gone. */
1603 xt_compat_flush_offsets(AF_INET6);
1604 xt_compat_unlock(AF_INET6);
1605
1606 memcpy(&repl, compatr, sizeof(*compatr));
1607
1608 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1609 repl.hook_entry[i] = newinfo->hook_entry[i];
1610 repl.underflow[i] = newinfo->underflow[i];
1611 }
1612
1613 repl.num_counters = 0;
1614 repl.counters = NULL;
1615 repl.size = newinfo->size;
1616 ret = translate_table(net, newinfo, entry1, &repl);
1617 if (ret)
1618 goto free_newinfo;
1619
1620 *pinfo = newinfo;
1621 *pentry0 = entry1;
1622 xt_free_table_info(info);
1623 return 0;
1624
1625 free_newinfo:
1626 xt_free_table_info(newinfo);
1627 return ret;
1628 out_unlock:
1629 xt_compat_flush_offsets(AF_INET6);
1630 xt_compat_unlock(AF_INET6);
1631 xt_entry_foreach(iter0, entry0, compatr->size) {
1632 if (j-- == 0)
1633 break;
1634 compat_release_entry(iter0);
1635 }
1636 return ret;
1637 }
1638
1639 static int
1640 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1641 {
1642 int ret;
1643 struct compat_ip6t_replace tmp;
1644 struct xt_table_info *newinfo;
1645 void *loc_cpu_entry;
1646 struct ip6t_entry *iter;
1647
1648 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1649 return -EFAULT;
1650
1651 /* overflow check */
1652 if (tmp.size >= INT_MAX / num_possible_cpus())
1653 return -ENOMEM;
1654 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1655 return -ENOMEM;
1656 if (tmp.num_counters == 0)
1657 return -EINVAL;
1658
1659 tmp.name[sizeof(tmp.name)-1] = 0;
1660
1661 newinfo = xt_alloc_table_info(tmp.size);
1662 if (!newinfo)
1663 return -ENOMEM;
1664
1665 /* choose the copy that is on our node/cpu */
1666 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1667 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1668 tmp.size) != 0) {
1669 ret = -EFAULT;
1670 goto free_newinfo;
1671 }
1672
1673 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1674 if (ret != 0)
1675 goto free_newinfo;
1676
1677 duprintf("compat_do_replace: Translated table\n");
1678
1679 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1680 tmp.num_counters, compat_ptr(tmp.counters));
1681 if (ret)
1682 goto free_newinfo_untrans;
1683 return 0;
1684
1685 free_newinfo_untrans:
1686 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1687 cleanup_entry(iter, net);
1688 free_newinfo:
1689 xt_free_table_info(newinfo);
1690 return ret;
1691 }
1692
1693 static int
1694 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1695 unsigned int len)
1696 {
1697 int ret;
1698
1699 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1700 return -EPERM;
1701
1702 switch (cmd) {
1703 case IP6T_SO_SET_REPLACE:
1704 ret = compat_do_replace(sock_net(sk), user, len);
1705 break;
1706
1707 case IP6T_SO_SET_ADD_COUNTERS:
1708 ret = do_add_counters(sock_net(sk), user, len, 1);
1709 break;
1710
1711 default:
1712 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1713 ret = -EINVAL;
1714 }
1715
1716 return ret;
1717 }
1718
1719 struct compat_ip6t_get_entries {
1720 char name[XT_TABLE_MAXNAMELEN];
1721 compat_uint_t size;
1722 struct compat_ip6t_entry entrytable[0];
1723 };
1724
1725 static int
1726 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1727 void __user *userptr)
1728 {
1729 struct xt_counters *counters;
1730 const struct xt_table_info *private = table->private;
1731 void __user *pos;
1732 unsigned int size;
1733 int ret = 0;
1734 const void *loc_cpu_entry;
1735 unsigned int i = 0;
1736 struct ip6t_entry *iter;
1737
1738 counters = alloc_counters(table);
1739 if (IS_ERR(counters))
1740 return PTR_ERR(counters);
1741
1742 /* choose the copy that is on our node/cpu, ...
1743 * This choice is lazy (because current thread is
1744 * allowed to migrate to another cpu)
1745 */
1746 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1747 pos = userptr;
1748 size = total_size;
1749 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1750 ret = compat_copy_entry_to_user(iter, &pos,
1751 &size, counters, i++);
1752 if (ret != 0)
1753 break;
1754 }
1755
1756 vfree(counters);
1757 return ret;
1758 }
1759
1760 static int
1761 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1762 int *len)
1763 {
1764 int ret;
1765 struct compat_ip6t_get_entries get;
1766 struct xt_table *t;
1767
1768 if (*len < sizeof(get)) {
1769 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1770 return -EINVAL;
1771 }
1772
1773 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1774 return -EFAULT;
1775
1776 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1777 duprintf("compat_get_entries: %u != %zu\n",
1778 *len, sizeof(get) + get.size);
1779 return -EINVAL;
1780 }
1781
1782 xt_compat_lock(AF_INET6);
1783 t = xt_find_table_lock(net, AF_INET6, get.name);
1784 if (!IS_ERR_OR_NULL(t)) {
1785 const struct xt_table_info *private = t->private;
1786 struct xt_table_info info;
1787 duprintf("t->private->number = %u\n", private->number);
1788 ret = compat_table_info(private, &info);
1789 if (!ret && get.size == info.size) {
1790 ret = compat_copy_entries_to_user(private->size,
1791 t, uptr->entrytable);
1792 } else if (!ret) {
1793 duprintf("compat_get_entries: I've got %u not %u!\n",
1794 private->size, get.size);
1795 ret = -EAGAIN;
1796 }
1797 xt_compat_flush_offsets(AF_INET6);
1798 module_put(t->me);
1799 xt_table_unlock(t);
1800 } else
1801 ret = t ? PTR_ERR(t) : -ENOENT;
1802
1803 xt_compat_unlock(AF_INET6);
1804 return ret;
1805 }
1806
1807 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1808
1809 static int
1810 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1811 {
1812 int ret;
1813
1814 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1815 return -EPERM;
1816
1817 switch (cmd) {
1818 case IP6T_SO_GET_INFO:
1819 ret = get_info(sock_net(sk), user, len, 1);
1820 break;
1821 case IP6T_SO_GET_ENTRIES:
1822 ret = compat_get_entries(sock_net(sk), user, len);
1823 break;
1824 default:
1825 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1826 }
1827 return ret;
1828 }
1829 #endif
1830
1831 static int
1832 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1833 {
1834 int ret;
1835
1836 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1837 return -EPERM;
1838
1839 switch (cmd) {
1840 case IP6T_SO_SET_REPLACE:
1841 ret = do_replace(sock_net(sk), user, len);
1842 break;
1843
1844 case IP6T_SO_SET_ADD_COUNTERS:
1845 ret = do_add_counters(sock_net(sk), user, len, 0);
1846 break;
1847
1848 default:
1849 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1850 ret = -EINVAL;
1851 }
1852
1853 return ret;
1854 }
1855
1856 static int
1857 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1858 {
1859 int ret;
1860
1861 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1862 return -EPERM;
1863
1864 switch (cmd) {
1865 case IP6T_SO_GET_INFO:
1866 ret = get_info(sock_net(sk), user, len, 0);
1867 break;
1868
1869 case IP6T_SO_GET_ENTRIES:
1870 ret = get_entries(sock_net(sk), user, len);
1871 break;
1872
1873 case IP6T_SO_GET_REVISION_MATCH:
1874 case IP6T_SO_GET_REVISION_TARGET: {
1875 struct xt_get_revision rev;
1876 int target;
1877
1878 if (*len != sizeof(rev)) {
1879 ret = -EINVAL;
1880 break;
1881 }
1882 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1883 ret = -EFAULT;
1884 break;
1885 }
1886 rev.name[sizeof(rev.name)-1] = 0;
1887
1888 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1889 target = 1;
1890 else
1891 target = 0;
1892
1893 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1894 rev.revision,
1895 target, &ret),
1896 "ip6t_%s", rev.name);
1897 break;
1898 }
1899
1900 default:
1901 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1902 ret = -EINVAL;
1903 }
1904
1905 return ret;
1906 }
1907
1908 struct xt_table *ip6t_register_table(struct net *net,
1909 const struct xt_table *table,
1910 const struct ip6t_replace *repl)
1911 {
1912 int ret;
1913 struct xt_table_info *newinfo;
1914 struct xt_table_info bootstrap = {0};
1915 void *loc_cpu_entry;
1916 struct xt_table *new_table;
1917
1918 newinfo = xt_alloc_table_info(repl->size);
1919 if (!newinfo) {
1920 ret = -ENOMEM;
1921 goto out;
1922 }
1923
1924 /* choose the copy on our node/cpu, but dont care about preemption */
1925 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1926 memcpy(loc_cpu_entry, repl->entries, repl->size);
1927
1928 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1929 if (ret != 0)
1930 goto out_free;
1931
1932 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1933 if (IS_ERR(new_table)) {
1934 ret = PTR_ERR(new_table);
1935 goto out_free;
1936 }
1937 return new_table;
1938
1939 out_free:
1940 xt_free_table_info(newinfo);
1941 out:
1942 return ERR_PTR(ret);
1943 }
1944
1945 void ip6t_unregister_table(struct net *net, struct xt_table *table)
1946 {
1947 struct xt_table_info *private;
1948 void *loc_cpu_entry;
1949 struct module *table_owner = table->me;
1950 struct ip6t_entry *iter;
1951
1952 private = xt_unregister_table(table);
1953
1954 /* Decrease module usage counts and free resources */
1955 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1956 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1957 cleanup_entry(iter, net);
1958 if (private->number > private->initial_entries)
1959 module_put(table_owner);
1960 xt_free_table_info(private);
1961 }
1962
1963 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1964 static inline bool
1965 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1966 u_int8_t type, u_int8_t code,
1967 bool invert)
1968 {
1969 return (type == test_type && code >= min_code && code <= max_code)
1970 ^ invert;
1971 }
1972
1973 static bool
1974 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1975 {
1976 const struct icmp6hdr *ic;
1977 struct icmp6hdr _icmph;
1978 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1979
1980 /* Must not be a fragment. */
1981 if (par->fragoff != 0)
1982 return false;
1983
1984 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1985 if (ic == NULL) {
1986 /* We've been asked to examine this packet, and we
1987 * can't. Hence, no choice but to drop.
1988 */
1989 duprintf("Dropping evil ICMP tinygram.\n");
1990 par->hotdrop = true;
1991 return false;
1992 }
1993
1994 return icmp6_type_code_match(icmpinfo->type,
1995 icmpinfo->code[0],
1996 icmpinfo->code[1],
1997 ic->icmp6_type, ic->icmp6_code,
1998 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1999 }
2000
2001 /* Called when user tries to insert an entry of this type. */
2002 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2003 {
2004 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2005
2006 /* Must specify no unknown invflags */
2007 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2008 }
2009
2010 /* The built-in targets: standard (NULL) and error. */
2011 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2012 {
2013 .name = XT_STANDARD_TARGET,
2014 .targetsize = sizeof(int),
2015 .family = NFPROTO_IPV6,
2016 #ifdef CONFIG_COMPAT
2017 .compatsize = sizeof(compat_int_t),
2018 .compat_from_user = compat_standard_from_user,
2019 .compat_to_user = compat_standard_to_user,
2020 #endif
2021 },
2022 {
2023 .name = XT_ERROR_TARGET,
2024 .target = ip6t_error,
2025 .targetsize = XT_FUNCTION_MAXNAMELEN,
2026 .family = NFPROTO_IPV6,
2027 },
2028 };
2029
2030 static struct nf_sockopt_ops ip6t_sockopts = {
2031 .pf = PF_INET6,
2032 .set_optmin = IP6T_BASE_CTL,
2033 .set_optmax = IP6T_SO_SET_MAX+1,
2034 .set = do_ip6t_set_ctl,
2035 #ifdef CONFIG_COMPAT
2036 .compat_set = compat_do_ip6t_set_ctl,
2037 #endif
2038 .get_optmin = IP6T_BASE_CTL,
2039 .get_optmax = IP6T_SO_GET_MAX+1,
2040 .get = do_ip6t_get_ctl,
2041 #ifdef CONFIG_COMPAT
2042 .compat_get = compat_do_ip6t_get_ctl,
2043 #endif
2044 .owner = THIS_MODULE,
2045 };
2046
2047 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2048 {
2049 .name = "icmp6",
2050 .match = icmp6_match,
2051 .matchsize = sizeof(struct ip6t_icmp),
2052 .checkentry = icmp6_checkentry,
2053 .proto = IPPROTO_ICMPV6,
2054 .family = NFPROTO_IPV6,
2055 },
2056 };
2057
2058 static int __net_init ip6_tables_net_init(struct net *net)
2059 {
2060 return xt_proto_init(net, NFPROTO_IPV6);
2061 }
2062
2063 static void __net_exit ip6_tables_net_exit(struct net *net)
2064 {
2065 xt_proto_fini(net, NFPROTO_IPV6);
2066 }
2067
2068 static struct pernet_operations ip6_tables_net_ops = {
2069 .init = ip6_tables_net_init,
2070 .exit = ip6_tables_net_exit,
2071 };
2072
2073 static int __init ip6_tables_init(void)
2074 {
2075 int ret;
2076
2077 ret = register_pernet_subsys(&ip6_tables_net_ops);
2078 if (ret < 0)
2079 goto err1;
2080
2081 /* No one else will be downing sem now, so we won't sleep */
2082 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2083 if (ret < 0)
2084 goto err2;
2085 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2086 if (ret < 0)
2087 goto err4;
2088
2089 /* Register setsockopt */
2090 ret = nf_register_sockopt(&ip6t_sockopts);
2091 if (ret < 0)
2092 goto err5;
2093
2094 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2095 return 0;
2096
2097 err5:
2098 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2099 err4:
2100 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2101 err2:
2102 unregister_pernet_subsys(&ip6_tables_net_ops);
2103 err1:
2104 return ret;
2105 }
2106
2107 static void __exit ip6_tables_fini(void)
2108 {
2109 nf_unregister_sockopt(&ip6t_sockopts);
2110
2111 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2112 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2113 unregister_pernet_subsys(&ip6_tables_net_ops);
2114 }
2115
2116 EXPORT_SYMBOL(ip6t_register_table);
2117 EXPORT_SYMBOL(ip6t_unregister_table);
2118 EXPORT_SYMBOL(ip6t_do_table);
2119
2120 module_init(ip6_tables_init);
2121 module_exit(ip6_tables_fini);