597f539f3d332364725db21c653b8c9ea7061fcb
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
203 }
204
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
207 {
208 return ip6t_get_target((struct ip6t_entry *)e);
209 }
210
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
219 };
220
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
225 };
226
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
231 };
232
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
235 .u = {
236 .log = {
237 .level = 4,
238 .logflags = NF_LOG_MASK,
239 },
240 },
241 };
242
243 /* Mildly perf critical (only if packet tracing is on) */
244 static inline int
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
248 {
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
250
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
254 (*rulenum) = 0;
255 } else if (s == e) {
256 (*rulenum)++;
257
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
261 t->verdict < 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
267 }
268 return 1;
269 } else
270 (*rulenum)++;
271
272 return 0;
273 }
274
275 static void trace_packet(const struct sk_buff *skb,
276 unsigned int hook,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
282 {
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
289
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
292
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
299 break;
300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
304 }
305 #endif
306
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
309 {
310 return (void *)entry + entry->next_offset;
311 }
312
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ip6t_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
320 {
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
340 * match it. */
341 acpar.hotdrop = false;
342 acpar.in = in;
343 acpar.out = out;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
346
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348
349 local_bh_disable();
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 cpu = smp_processor_id();
353 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
355 stackptr = per_cpu_ptr(private->stackptr, cpu);
356 origptr = *stackptr;
357
358 e = get_entry(table_base, private->hook_entry[hook]);
359
360 do {
361 const struct xt_entry_target *t;
362 const struct xt_entry_match *ematch;
363
364 IP_NF_ASSERT(e);
365 acpar.thoff = 0;
366 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
367 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
368 no_match:
369 e = ip6t_next_entry(e);
370 continue;
371 }
372
373 xt_ematch_foreach(ematch, e) {
374 acpar.match = ematch->u.kernel.match;
375 acpar.matchinfo = ematch->data;
376 if (!acpar.match->match(skb, &acpar))
377 goto no_match;
378 }
379
380 ADD_COUNTER(e->counters, skb->len, 1);
381
382 t = ip6t_get_target_c(e);
383 IP_NF_ASSERT(t->u.kernel.target);
384
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out,
389 table->name, private, e);
390 #endif
391 /* Standard target? */
392 if (!t->u.kernel.target->target) {
393 int v;
394
395 v = ((struct xt_standard_target *)t)->verdict;
396 if (v < 0) {
397 /* Pop from stack? */
398 if (v != XT_RETURN) {
399 verdict = (unsigned int)(-v) - 1;
400 break;
401 }
402 if (*stackptr <= origptr)
403 e = get_entry(table_base,
404 private->underflow[hook]);
405 else
406 e = ip6t_next_entry(jumpstack[--*stackptr]);
407 continue;
408 }
409 if (table_base + v != ip6t_next_entry(e) &&
410 !(e->ipv6.flags & IP6T_F_GOTO)) {
411 if (*stackptr >= private->stacksize) {
412 verdict = NF_DROP;
413 break;
414 }
415 jumpstack[(*stackptr)++] = e;
416 }
417
418 e = get_entry(table_base, v);
419 continue;
420 }
421
422 acpar.target = t->u.kernel.target;
423 acpar.targinfo = t->data;
424
425 verdict = t->u.kernel.target->target(skb, &acpar);
426 if (verdict == XT_CONTINUE)
427 e = ip6t_next_entry(e);
428 else
429 /* Verdict */
430 break;
431 } while (!acpar.hotdrop);
432
433 *stackptr = origptr;
434
435 xt_write_recseq_end(addend);
436 local_bh_enable();
437
438 #ifdef DEBUG_ALLOW_ALL
439 return NF_ACCEPT;
440 #else
441 if (acpar.hotdrop)
442 return NF_DROP;
443 else return verdict;
444 #endif
445 }
446
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
449 static int
450 mark_source_chains(const struct xt_table_info *newinfo,
451 unsigned int valid_hooks, void *entry0)
452 {
453 unsigned int hook;
454
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 unsigned int pos = newinfo->hook_entry[hook];
459 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
460
461 if (!(valid_hooks & (1 << hook)))
462 continue;
463
464 /* Set initial back pointer. */
465 e->counters.pcnt = pos;
466
467 for (;;) {
468 const struct xt_standard_target *t
469 = (void *)ip6t_get_target_c(e);
470 int visited = e->comefrom & (1 << hook);
471
472 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook, pos, e->comefrom);
475 return 0;
476 }
477 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478
479 /* Unconditional return/END. */
480 if ((e->target_offset == sizeof(struct ip6t_entry) &&
481 (strcmp(t->target.u.user.name,
482 XT_STANDARD_TARGET) == 0) &&
483 t->verdict < 0 &&
484 unconditional(&e->ipv6)) || visited) {
485 unsigned int oldpos, size;
486
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
492 t->verdict);
493 return 0;
494 }
495
496 /* Return: backtrack through the last
497 big jump. */
498 do {
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
501 if (e->comefrom
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
504 "on hook %u "
505 "rule %u\n",
506 hook, pos);
507 }
508 #endif
509 oldpos = pos;
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
512
513 /* We're at the start. */
514 if (pos == oldpos)
515 goto next;
516
517 e = (struct ip6t_entry *)
518 (entry0 + pos);
519 } while (oldpos == pos + e->next_offset);
520
521 /* Move along one */
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 e->counters.pcnt = pos;
526 pos += size;
527 } else {
528 int newpos = t->verdict;
529
530 if (strcmp(t->target.u.user.name,
531 XT_STANDARD_TARGET) == 0 &&
532 newpos >= 0) {
533 if (newpos > newinfo->size -
534 sizeof(struct ip6t_entry)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
537 newpos);
538 return 0;
539 }
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
542 pos, newpos);
543 } else {
544 /* ... this is a fallthru */
545 newpos = pos + e->next_offset;
546 }
547 e = (struct ip6t_entry *)
548 (entry0 + newpos);
549 e->counters.pcnt = pos;
550 pos = newpos;
551 }
552 }
553 next:
554 duprintf("Finished chain %u\n", hook);
555 }
556 return 1;
557 }
558
559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
560 {
561 struct xt_mtdtor_param par;
562
563 par.net = net;
564 par.match = m->u.kernel.match;
565 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV6;
567 if (par.match->destroy != NULL)
568 par.match->destroy(&par);
569 module_put(par.match->me);
570 }
571
572 static int
573 check_entry(const struct ip6t_entry *e)
574 {
575 const struct xt_entry_target *t;
576
577 if (!ip6_checkentry(&e->ipv6))
578 return -EINVAL;
579
580 if (e->target_offset + sizeof(struct xt_entry_target) >
581 e->next_offset)
582 return -EINVAL;
583
584 t = ip6t_get_target_c(e);
585 if (e->target_offset + t->u.target_size > e->next_offset)
586 return -EINVAL;
587
588 return 0;
589 }
590
591 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
592 {
593 const struct ip6t_ip6 *ipv6 = par->entryinfo;
594 int ret;
595
596 par->match = m->u.kernel.match;
597 par->matchinfo = m->data;
598
599 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
600 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
601 if (ret < 0) {
602 duprintf("ip_tables: check failed for `%s'.\n",
603 par.match->name);
604 return ret;
605 }
606 return 0;
607 }
608
609 static int
610 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
611 {
612 struct xt_match *match;
613 int ret;
614
615 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
616 m->u.user.revision);
617 if (IS_ERR(match)) {
618 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
619 return PTR_ERR(match);
620 }
621 m->u.kernel.match = match;
622
623 ret = check_match(m, par);
624 if (ret)
625 goto err;
626
627 return 0;
628 err:
629 module_put(m->u.kernel.match->me);
630 return ret;
631 }
632
633 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
634 {
635 struct xt_entry_target *t = ip6t_get_target(e);
636 struct xt_tgchk_param par = {
637 .net = net,
638 .table = name,
639 .entryinfo = e,
640 .target = t->u.kernel.target,
641 .targinfo = t->data,
642 .hook_mask = e->comefrom,
643 .family = NFPROTO_IPV6,
644 };
645 int ret;
646
647 t = ip6t_get_target(e);
648 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
649 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
650 if (ret < 0) {
651 duprintf("ip_tables: check failed for `%s'.\n",
652 t->u.kernel.target->name);
653 return ret;
654 }
655 return 0;
656 }
657
658 static int
659 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
660 unsigned int size)
661 {
662 struct xt_entry_target *t;
663 struct xt_target *target;
664 int ret;
665 unsigned int j;
666 struct xt_mtchk_param mtpar;
667 struct xt_entry_match *ematch;
668
669 j = 0;
670 mtpar.net = net;
671 mtpar.table = name;
672 mtpar.entryinfo = &e->ipv6;
673 mtpar.hook_mask = e->comefrom;
674 mtpar.family = NFPROTO_IPV6;
675 xt_ematch_foreach(ematch, e) {
676 ret = find_check_match(ematch, &mtpar);
677 if (ret != 0)
678 goto cleanup_matches;
679 ++j;
680 }
681
682 t = ip6t_get_target(e);
683 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
684 t->u.user.revision);
685 if (IS_ERR(target)) {
686 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
687 ret = PTR_ERR(target);
688 goto cleanup_matches;
689 }
690 t->u.kernel.target = target;
691
692 ret = check_target(e, net, name);
693 if (ret)
694 goto err;
695 return 0;
696 err:
697 module_put(t->u.kernel.target->me);
698 cleanup_matches:
699 xt_ematch_foreach(ematch, e) {
700 if (j-- == 0)
701 break;
702 cleanup_match(ematch, net);
703 }
704 return ret;
705 }
706
707 static bool check_underflow(const struct ip6t_entry *e)
708 {
709 const struct xt_entry_target *t;
710 unsigned int verdict;
711
712 if (!unconditional(&e->ipv6))
713 return false;
714 t = ip6t_get_target_c(e);
715 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
716 return false;
717 verdict = ((struct xt_standard_target *)t)->verdict;
718 verdict = -verdict - 1;
719 return verdict == NF_DROP || verdict == NF_ACCEPT;
720 }
721
722 static int
723 check_entry_size_and_hooks(struct ip6t_entry *e,
724 struct xt_table_info *newinfo,
725 const unsigned char *base,
726 const unsigned char *limit,
727 const unsigned int *hook_entries,
728 const unsigned int *underflows,
729 unsigned int valid_hooks)
730 {
731 unsigned int h;
732 int err;
733
734 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
735 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
736 duprintf("Bad offset %p\n", e);
737 return -EINVAL;
738 }
739
740 if (e->next_offset
741 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
742 duprintf("checking: element %p size %u\n",
743 e, e->next_offset);
744 return -EINVAL;
745 }
746
747 err = check_entry(e);
748 if (err)
749 return err;
750
751 /* Check hooks & underflows */
752 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
753 if (!(valid_hooks & (1 << h)))
754 continue;
755 if ((unsigned char *)e - base == hook_entries[h])
756 newinfo->hook_entry[h] = hook_entries[h];
757 if ((unsigned char *)e - base == underflows[h]) {
758 if (!check_underflow(e)) {
759 pr_err("Underflows must be unconditional and "
760 "use the STANDARD target with "
761 "ACCEPT/DROP\n");
762 return -EINVAL;
763 }
764 newinfo->underflow[h] = underflows[h];
765 }
766 }
767
768 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 });
770 e->comefrom = 0;
771 return 0;
772 }
773
774 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
775 {
776 struct xt_tgdtor_param par;
777 struct xt_entry_target *t;
778 struct xt_entry_match *ematch;
779
780 /* Cleanup all matches */
781 xt_ematch_foreach(ematch, e)
782 cleanup_match(ematch, net);
783 t = ip6t_get_target(e);
784
785 par.net = net;
786 par.target = t->u.kernel.target;
787 par.targinfo = t->data;
788 par.family = NFPROTO_IPV6;
789 if (par.target->destroy != NULL)
790 par.target->destroy(&par);
791 module_put(par.target->me);
792 }
793
794 /* Checks and translates the user-supplied table segment (held in
795 newinfo) */
796 static int
797 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
798 const struct ip6t_replace *repl)
799 {
800 struct ip6t_entry *iter;
801 unsigned int i;
802 int ret = 0;
803
804 newinfo->size = repl->size;
805 newinfo->number = repl->num_entries;
806
807 /* Init all hooks to impossible value. */
808 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
809 newinfo->hook_entry[i] = 0xFFFFFFFF;
810 newinfo->underflow[i] = 0xFFFFFFFF;
811 }
812
813 duprintf("translate_table: size %u\n", newinfo->size);
814 i = 0;
815 /* Walk through entries, checking offsets. */
816 xt_entry_foreach(iter, entry0, newinfo->size) {
817 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
818 entry0 + repl->size,
819 repl->hook_entry,
820 repl->underflow,
821 repl->valid_hooks);
822 if (ret != 0)
823 return ret;
824 ++i;
825 if (strcmp(ip6t_get_target(iter)->u.user.name,
826 XT_ERROR_TARGET) == 0)
827 ++newinfo->stacksize;
828 }
829
830 if (i != repl->num_entries) {
831 duprintf("translate_table: %u not %u entries\n",
832 i, repl->num_entries);
833 return -EINVAL;
834 }
835
836 /* Check hooks all assigned */
837 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
838 /* Only hooks which are valid */
839 if (!(repl->valid_hooks & (1 << i)))
840 continue;
841 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
842 duprintf("Invalid hook entry %u %u\n",
843 i, repl->hook_entry[i]);
844 return -EINVAL;
845 }
846 if (newinfo->underflow[i] == 0xFFFFFFFF) {
847 duprintf("Invalid underflow %u %u\n",
848 i, repl->underflow[i]);
849 return -EINVAL;
850 }
851 }
852
853 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
854 return -ELOOP;
855
856 /* Finally, each sanity check must pass */
857 i = 0;
858 xt_entry_foreach(iter, entry0, newinfo->size) {
859 ret = find_check_entry(iter, net, repl->name, repl->size);
860 if (ret != 0)
861 break;
862 ++i;
863 }
864
865 if (ret != 0) {
866 xt_entry_foreach(iter, entry0, newinfo->size) {
867 if (i-- == 0)
868 break;
869 cleanup_entry(iter, net);
870 }
871 return ret;
872 }
873
874 /* And one copy for every other CPU */
875 for_each_possible_cpu(i) {
876 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
877 memcpy(newinfo->entries[i], entry0, newinfo->size);
878 }
879
880 return ret;
881 }
882
883 static void
884 get_counters(const struct xt_table_info *t,
885 struct xt_counters counters[])
886 {
887 struct ip6t_entry *iter;
888 unsigned int cpu;
889 unsigned int i;
890
891 for_each_possible_cpu(cpu) {
892 seqcount_t *s = &per_cpu(xt_recseq, cpu);
893
894 i = 0;
895 xt_entry_foreach(iter, t->entries[cpu], t->size) {
896 u64 bcnt, pcnt;
897 unsigned int start;
898
899 do {
900 start = read_seqcount_begin(s);
901 bcnt = iter->counters.bcnt;
902 pcnt = iter->counters.pcnt;
903 } while (read_seqcount_retry(s, start));
904
905 ADD_COUNTER(counters[i], bcnt, pcnt);
906 ++i;
907 }
908 }
909 }
910
911 static struct xt_counters *alloc_counters(const struct xt_table *table)
912 {
913 unsigned int countersize;
914 struct xt_counters *counters;
915 const struct xt_table_info *private = table->private;
916
917 /* We need atomic snapshot of counters: rest doesn't change
918 (other than comefrom, which userspace doesn't care
919 about). */
920 countersize = sizeof(struct xt_counters) * private->number;
921 counters = vzalloc(countersize);
922
923 if (counters == NULL)
924 return ERR_PTR(-ENOMEM);
925
926 get_counters(private, counters);
927
928 return counters;
929 }
930
931 static int
932 copy_entries_to_user(unsigned int total_size,
933 const struct xt_table *table,
934 void __user *userptr)
935 {
936 unsigned int off, num;
937 const struct ip6t_entry *e;
938 struct xt_counters *counters;
939 const struct xt_table_info *private = table->private;
940 int ret = 0;
941 const void *loc_cpu_entry;
942
943 counters = alloc_counters(table);
944 if (IS_ERR(counters))
945 return PTR_ERR(counters);
946
947 /* choose the copy that is on our node/cpu, ...
948 * This choice is lazy (because current thread is
949 * allowed to migrate to another cpu)
950 */
951 loc_cpu_entry = private->entries[raw_smp_processor_id()];
952 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
953 ret = -EFAULT;
954 goto free_counters;
955 }
956
957 /* FIXME: use iterator macros --RR */
958 /* ... then go back and fix counters and names */
959 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
960 unsigned int i;
961 const struct xt_entry_match *m;
962 const struct xt_entry_target *t;
963
964 e = (struct ip6t_entry *)(loc_cpu_entry + off);
965 if (copy_to_user(userptr + off
966 + offsetof(struct ip6t_entry, counters),
967 &counters[num],
968 sizeof(counters[num])) != 0) {
969 ret = -EFAULT;
970 goto free_counters;
971 }
972
973 for (i = sizeof(struct ip6t_entry);
974 i < e->target_offset;
975 i += m->u.match_size) {
976 m = (void *)e + i;
977
978 if (copy_to_user(userptr + off + i
979 + offsetof(struct xt_entry_match,
980 u.user.name),
981 m->u.kernel.match->name,
982 strlen(m->u.kernel.match->name)+1)
983 != 0) {
984 ret = -EFAULT;
985 goto free_counters;
986 }
987 }
988
989 t = ip6t_get_target_c(e);
990 if (copy_to_user(userptr + off + e->target_offset
991 + offsetof(struct xt_entry_target,
992 u.user.name),
993 t->u.kernel.target->name,
994 strlen(t->u.kernel.target->name)+1) != 0) {
995 ret = -EFAULT;
996 goto free_counters;
997 }
998 }
999
1000 free_counters:
1001 vfree(counters);
1002 return ret;
1003 }
1004
1005 #ifdef CONFIG_COMPAT
1006 static void compat_standard_from_user(void *dst, const void *src)
1007 {
1008 int v = *(compat_int_t *)src;
1009
1010 if (v > 0)
1011 v += xt_compat_calc_jump(AF_INET6, v);
1012 memcpy(dst, &v, sizeof(v));
1013 }
1014
1015 static int compat_standard_to_user(void __user *dst, const void *src)
1016 {
1017 compat_int_t cv = *(int *)src;
1018
1019 if (cv > 0)
1020 cv -= xt_compat_calc_jump(AF_INET6, cv);
1021 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1022 }
1023
1024 static int compat_calc_entry(const struct ip6t_entry *e,
1025 const struct xt_table_info *info,
1026 const void *base, struct xt_table_info *newinfo)
1027 {
1028 const struct xt_entry_match *ematch;
1029 const struct xt_entry_target *t;
1030 unsigned int entry_offset;
1031 int off, i, ret;
1032
1033 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1034 entry_offset = (void *)e - base;
1035 xt_ematch_foreach(ematch, e)
1036 off += xt_compat_match_offset(ematch->u.kernel.match);
1037 t = ip6t_get_target_c(e);
1038 off += xt_compat_target_offset(t->u.kernel.target);
1039 newinfo->size -= off;
1040 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1041 if (ret)
1042 return ret;
1043
1044 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1045 if (info->hook_entry[i] &&
1046 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1047 newinfo->hook_entry[i] -= off;
1048 if (info->underflow[i] &&
1049 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1050 newinfo->underflow[i] -= off;
1051 }
1052 return 0;
1053 }
1054
1055 static int compat_table_info(const struct xt_table_info *info,
1056 struct xt_table_info *newinfo)
1057 {
1058 struct ip6t_entry *iter;
1059 void *loc_cpu_entry;
1060 int ret;
1061
1062 if (!newinfo || !info)
1063 return -EINVAL;
1064
1065 /* we dont care about newinfo->entries[] */
1066 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1067 newinfo->initial_entries = 0;
1068 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1069 xt_compat_init_offsets(AF_INET6, info->number);
1070 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1071 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1072 if (ret != 0)
1073 return ret;
1074 }
1075 return 0;
1076 }
1077 #endif
1078
1079 static int get_info(struct net *net, void __user *user,
1080 const int *len, int compat)
1081 {
1082 char name[XT_TABLE_MAXNAMELEN];
1083 struct xt_table *t;
1084 int ret;
1085
1086 if (*len != sizeof(struct ip6t_getinfo)) {
1087 duprintf("length %u != %zu\n", *len,
1088 sizeof(struct ip6t_getinfo));
1089 return -EINVAL;
1090 }
1091
1092 if (copy_from_user(name, user, sizeof(name)) != 0)
1093 return -EFAULT;
1094
1095 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1096 #ifdef CONFIG_COMPAT
1097 if (compat)
1098 xt_compat_lock(AF_INET6);
1099 #endif
1100 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1101 "ip6table_%s", name);
1102 if (!IS_ERR_OR_NULL(t)) {
1103 struct ip6t_getinfo info;
1104 const struct xt_table_info *private = t->private;
1105 #ifdef CONFIG_COMPAT
1106 struct xt_table_info tmp;
1107
1108 if (compat) {
1109 ret = compat_table_info(private, &tmp);
1110 xt_compat_flush_offsets(AF_INET6);
1111 private = &tmp;
1112 }
1113 #endif
1114 memset(&info, 0, sizeof(info));
1115 info.valid_hooks = t->valid_hooks;
1116 memcpy(info.hook_entry, private->hook_entry,
1117 sizeof(info.hook_entry));
1118 memcpy(info.underflow, private->underflow,
1119 sizeof(info.underflow));
1120 info.num_entries = private->number;
1121 info.size = private->size;
1122 strcpy(info.name, name);
1123
1124 if (copy_to_user(user, &info, *len) != 0)
1125 ret = -EFAULT;
1126 else
1127 ret = 0;
1128
1129 xt_table_unlock(t);
1130 module_put(t->me);
1131 } else
1132 ret = t ? PTR_ERR(t) : -ENOENT;
1133 #ifdef CONFIG_COMPAT
1134 if (compat)
1135 xt_compat_unlock(AF_INET6);
1136 #endif
1137 return ret;
1138 }
1139
1140 static int
1141 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1142 const int *len)
1143 {
1144 int ret;
1145 struct ip6t_get_entries get;
1146 struct xt_table *t;
1147
1148 if (*len < sizeof(get)) {
1149 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1150 return -EINVAL;
1151 }
1152 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1153 return -EFAULT;
1154 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1155 duprintf("get_entries: %u != %zu\n",
1156 *len, sizeof(get) + get.size);
1157 return -EINVAL;
1158 }
1159
1160 t = xt_find_table_lock(net, AF_INET6, get.name);
1161 if (!IS_ERR_OR_NULL(t)) {
1162 struct xt_table_info *private = t->private;
1163 duprintf("t->private->number = %u\n", private->number);
1164 if (get.size == private->size)
1165 ret = copy_entries_to_user(private->size,
1166 t, uptr->entrytable);
1167 else {
1168 duprintf("get_entries: I've got %u not %u!\n",
1169 private->size, get.size);
1170 ret = -EAGAIN;
1171 }
1172 module_put(t->me);
1173 xt_table_unlock(t);
1174 } else
1175 ret = t ? PTR_ERR(t) : -ENOENT;
1176
1177 return ret;
1178 }
1179
1180 static int
1181 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1182 struct xt_table_info *newinfo, unsigned int num_counters,
1183 void __user *counters_ptr)
1184 {
1185 int ret;
1186 struct xt_table *t;
1187 struct xt_table_info *oldinfo;
1188 struct xt_counters *counters;
1189 const void *loc_cpu_old_entry;
1190 struct ip6t_entry *iter;
1191
1192 ret = 0;
1193 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1194 if (!counters) {
1195 ret = -ENOMEM;
1196 goto out;
1197 }
1198
1199 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1200 "ip6table_%s", name);
1201 if (IS_ERR_OR_NULL(t)) {
1202 ret = t ? PTR_ERR(t) : -ENOENT;
1203 goto free_newinfo_counters_untrans;
1204 }
1205
1206 /* You lied! */
1207 if (valid_hooks != t->valid_hooks) {
1208 duprintf("Valid hook crap: %08X vs %08X\n",
1209 valid_hooks, t->valid_hooks);
1210 ret = -EINVAL;
1211 goto put_module;
1212 }
1213
1214 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1215 if (!oldinfo)
1216 goto put_module;
1217
1218 /* Update module usage count based on number of rules */
1219 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1220 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1221 if ((oldinfo->number > oldinfo->initial_entries) ||
1222 (newinfo->number <= oldinfo->initial_entries))
1223 module_put(t->me);
1224 if ((oldinfo->number > oldinfo->initial_entries) &&
1225 (newinfo->number <= oldinfo->initial_entries))
1226 module_put(t->me);
1227
1228 /* Get the old counters, and synchronize with replace */
1229 get_counters(oldinfo, counters);
1230
1231 /* Decrease module usage counts and free resource */
1232 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1233 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1234 cleanup_entry(iter, net);
1235
1236 xt_free_table_info(oldinfo);
1237 if (copy_to_user(counters_ptr, counters,
1238 sizeof(struct xt_counters) * num_counters) != 0) {
1239 /* Silent error, can't fail, new table is already in place */
1240 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1241 }
1242 vfree(counters);
1243 xt_table_unlock(t);
1244 return ret;
1245
1246 put_module:
1247 module_put(t->me);
1248 xt_table_unlock(t);
1249 free_newinfo_counters_untrans:
1250 vfree(counters);
1251 out:
1252 return ret;
1253 }
1254
1255 static int
1256 do_replace(struct net *net, const void __user *user, unsigned int len)
1257 {
1258 int ret;
1259 struct ip6t_replace tmp;
1260 struct xt_table_info *newinfo;
1261 void *loc_cpu_entry;
1262 struct ip6t_entry *iter;
1263
1264 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1265 return -EFAULT;
1266
1267 /* overflow check */
1268 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1269 return -ENOMEM;
1270 tmp.name[sizeof(tmp.name)-1] = 0;
1271
1272 newinfo = xt_alloc_table_info(tmp.size);
1273 if (!newinfo)
1274 return -ENOMEM;
1275
1276 /* choose the copy that is on our node/cpu */
1277 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1278 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1279 tmp.size) != 0) {
1280 ret = -EFAULT;
1281 goto free_newinfo;
1282 }
1283
1284 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1285 if (ret != 0)
1286 goto free_newinfo;
1287
1288 duprintf("ip_tables: Translated table\n");
1289
1290 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1291 tmp.num_counters, tmp.counters);
1292 if (ret)
1293 goto free_newinfo_untrans;
1294 return 0;
1295
1296 free_newinfo_untrans:
1297 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1298 cleanup_entry(iter, net);
1299 free_newinfo:
1300 xt_free_table_info(newinfo);
1301 return ret;
1302 }
1303
1304 static int
1305 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1306 int compat)
1307 {
1308 unsigned int i, curcpu;
1309 struct xt_counters_info tmp;
1310 struct xt_counters *paddc;
1311 unsigned int num_counters;
1312 char *name;
1313 int size;
1314 void *ptmp;
1315 struct xt_table *t;
1316 const struct xt_table_info *private;
1317 int ret = 0;
1318 const void *loc_cpu_entry;
1319 struct ip6t_entry *iter;
1320 unsigned int addend;
1321 #ifdef CONFIG_COMPAT
1322 struct compat_xt_counters_info compat_tmp;
1323
1324 if (compat) {
1325 ptmp = &compat_tmp;
1326 size = sizeof(struct compat_xt_counters_info);
1327 } else
1328 #endif
1329 {
1330 ptmp = &tmp;
1331 size = sizeof(struct xt_counters_info);
1332 }
1333
1334 if (copy_from_user(ptmp, user, size) != 0)
1335 return -EFAULT;
1336
1337 #ifdef CONFIG_COMPAT
1338 if (compat) {
1339 num_counters = compat_tmp.num_counters;
1340 name = compat_tmp.name;
1341 } else
1342 #endif
1343 {
1344 num_counters = tmp.num_counters;
1345 name = tmp.name;
1346 }
1347
1348 if (len != size + num_counters * sizeof(struct xt_counters))
1349 return -EINVAL;
1350
1351 paddc = vmalloc(len - size);
1352 if (!paddc)
1353 return -ENOMEM;
1354
1355 if (copy_from_user(paddc, user + size, len - size) != 0) {
1356 ret = -EFAULT;
1357 goto free;
1358 }
1359
1360 t = xt_find_table_lock(net, AF_INET6, name);
1361 if (IS_ERR_OR_NULL(t)) {
1362 ret = t ? PTR_ERR(t) : -ENOENT;
1363 goto free;
1364 }
1365
1366
1367 local_bh_disable();
1368 private = t->private;
1369 if (private->number != num_counters) {
1370 ret = -EINVAL;
1371 goto unlock_up_free;
1372 }
1373
1374 i = 0;
1375 /* Choose the copy that is on our node */
1376 curcpu = smp_processor_id();
1377 addend = xt_write_recseq_begin();
1378 loc_cpu_entry = private->entries[curcpu];
1379 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1380 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1381 ++i;
1382 }
1383 xt_write_recseq_end(addend);
1384
1385 unlock_up_free:
1386 local_bh_enable();
1387 xt_table_unlock(t);
1388 module_put(t->me);
1389 free:
1390 vfree(paddc);
1391
1392 return ret;
1393 }
1394
1395 #ifdef CONFIG_COMPAT
1396 struct compat_ip6t_replace {
1397 char name[XT_TABLE_MAXNAMELEN];
1398 u32 valid_hooks;
1399 u32 num_entries;
1400 u32 size;
1401 u32 hook_entry[NF_INET_NUMHOOKS];
1402 u32 underflow[NF_INET_NUMHOOKS];
1403 u32 num_counters;
1404 compat_uptr_t counters; /* struct xt_counters * */
1405 struct compat_ip6t_entry entries[0];
1406 };
1407
1408 static int
1409 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1410 unsigned int *size, struct xt_counters *counters,
1411 unsigned int i)
1412 {
1413 struct xt_entry_target *t;
1414 struct compat_ip6t_entry __user *ce;
1415 u_int16_t target_offset, next_offset;
1416 compat_uint_t origsize;
1417 const struct xt_entry_match *ematch;
1418 int ret = 0;
1419
1420 origsize = *size;
1421 ce = (struct compat_ip6t_entry __user *)*dstptr;
1422 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1423 copy_to_user(&ce->counters, &counters[i],
1424 sizeof(counters[i])) != 0)
1425 return -EFAULT;
1426
1427 *dstptr += sizeof(struct compat_ip6t_entry);
1428 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1429
1430 xt_ematch_foreach(ematch, e) {
1431 ret = xt_compat_match_to_user(ematch, dstptr, size);
1432 if (ret != 0)
1433 return ret;
1434 }
1435 target_offset = e->target_offset - (origsize - *size);
1436 t = ip6t_get_target(e);
1437 ret = xt_compat_target_to_user(t, dstptr, size);
1438 if (ret)
1439 return ret;
1440 next_offset = e->next_offset - (origsize - *size);
1441 if (put_user(target_offset, &ce->target_offset) != 0 ||
1442 put_user(next_offset, &ce->next_offset) != 0)
1443 return -EFAULT;
1444 return 0;
1445 }
1446
1447 static int
1448 compat_find_calc_match(struct xt_entry_match *m,
1449 const char *name,
1450 const struct ip6t_ip6 *ipv6,
1451 unsigned int hookmask,
1452 int *size)
1453 {
1454 struct xt_match *match;
1455
1456 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1457 m->u.user.revision);
1458 if (IS_ERR(match)) {
1459 duprintf("compat_check_calc_match: `%s' not found\n",
1460 m->u.user.name);
1461 return PTR_ERR(match);
1462 }
1463 m->u.kernel.match = match;
1464 *size += xt_compat_match_offset(match);
1465 return 0;
1466 }
1467
1468 static void compat_release_entry(struct compat_ip6t_entry *e)
1469 {
1470 struct xt_entry_target *t;
1471 struct xt_entry_match *ematch;
1472
1473 /* Cleanup all matches */
1474 xt_ematch_foreach(ematch, e)
1475 module_put(ematch->u.kernel.match->me);
1476 t = compat_ip6t_get_target(e);
1477 module_put(t->u.kernel.target->me);
1478 }
1479
1480 static int
1481 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1482 struct xt_table_info *newinfo,
1483 unsigned int *size,
1484 const unsigned char *base,
1485 const unsigned char *limit,
1486 const unsigned int *hook_entries,
1487 const unsigned int *underflows,
1488 const char *name)
1489 {
1490 struct xt_entry_match *ematch;
1491 struct xt_entry_target *t;
1492 struct xt_target *target;
1493 unsigned int entry_offset;
1494 unsigned int j;
1495 int ret, off, h;
1496
1497 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1498 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1499 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1500 duprintf("Bad offset %p, limit = %p\n", e, limit);
1501 return -EINVAL;
1502 }
1503
1504 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1505 sizeof(struct compat_xt_entry_target)) {
1506 duprintf("checking: element %p size %u\n",
1507 e, e->next_offset);
1508 return -EINVAL;
1509 }
1510
1511 /* For purposes of check_entry casting the compat entry is fine */
1512 ret = check_entry((struct ip6t_entry *)e);
1513 if (ret)
1514 return ret;
1515
1516 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1517 entry_offset = (void *)e - (void *)base;
1518 j = 0;
1519 xt_ematch_foreach(ematch, e) {
1520 ret = compat_find_calc_match(ematch, name,
1521 &e->ipv6, e->comefrom, &off);
1522 if (ret != 0)
1523 goto release_matches;
1524 ++j;
1525 }
1526
1527 t = compat_ip6t_get_target(e);
1528 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1529 t->u.user.revision);
1530 if (IS_ERR(target)) {
1531 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1532 t->u.user.name);
1533 ret = PTR_ERR(target);
1534 goto release_matches;
1535 }
1536 t->u.kernel.target = target;
1537
1538 off += xt_compat_target_offset(target);
1539 *size += off;
1540 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1541 if (ret)
1542 goto out;
1543
1544 /* Check hooks & underflows */
1545 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1546 if ((unsigned char *)e - base == hook_entries[h])
1547 newinfo->hook_entry[h] = hook_entries[h];
1548 if ((unsigned char *)e - base == underflows[h])
1549 newinfo->underflow[h] = underflows[h];
1550 }
1551
1552 /* Clear counters and comefrom */
1553 memset(&e->counters, 0, sizeof(e->counters));
1554 e->comefrom = 0;
1555 return 0;
1556
1557 out:
1558 module_put(t->u.kernel.target->me);
1559 release_matches:
1560 xt_ematch_foreach(ematch, e) {
1561 if (j-- == 0)
1562 break;
1563 module_put(ematch->u.kernel.match->me);
1564 }
1565 return ret;
1566 }
1567
1568 static int
1569 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1570 unsigned int *size, const char *name,
1571 struct xt_table_info *newinfo, unsigned char *base)
1572 {
1573 struct xt_entry_target *t;
1574 struct ip6t_entry *de;
1575 unsigned int origsize;
1576 int ret, h;
1577 struct xt_entry_match *ematch;
1578
1579 ret = 0;
1580 origsize = *size;
1581 de = (struct ip6t_entry *)*dstptr;
1582 memcpy(de, e, sizeof(struct ip6t_entry));
1583 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1584
1585 *dstptr += sizeof(struct ip6t_entry);
1586 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1587
1588 xt_ematch_foreach(ematch, e) {
1589 ret = xt_compat_match_from_user(ematch, dstptr, size);
1590 if (ret != 0)
1591 return ret;
1592 }
1593 de->target_offset = e->target_offset - (origsize - *size);
1594 t = compat_ip6t_get_target(e);
1595 xt_compat_target_from_user(t, dstptr, size);
1596
1597 de->next_offset = e->next_offset - (origsize - *size);
1598 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1599 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1600 newinfo->hook_entry[h] -= origsize - *size;
1601 if ((unsigned char *)de - base < newinfo->underflow[h])
1602 newinfo->underflow[h] -= origsize - *size;
1603 }
1604 return ret;
1605 }
1606
1607 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1608 const char *name)
1609 {
1610 unsigned int j;
1611 int ret = 0;
1612 struct xt_mtchk_param mtpar;
1613 struct xt_entry_match *ematch;
1614
1615 j = 0;
1616 mtpar.net = net;
1617 mtpar.table = name;
1618 mtpar.entryinfo = &e->ipv6;
1619 mtpar.hook_mask = e->comefrom;
1620 mtpar.family = NFPROTO_IPV6;
1621 xt_ematch_foreach(ematch, e) {
1622 ret = check_match(ematch, &mtpar);
1623 if (ret != 0)
1624 goto cleanup_matches;
1625 ++j;
1626 }
1627
1628 ret = check_target(e, net, name);
1629 if (ret)
1630 goto cleanup_matches;
1631 return 0;
1632
1633 cleanup_matches:
1634 xt_ematch_foreach(ematch, e) {
1635 if (j-- == 0)
1636 break;
1637 cleanup_match(ematch, net);
1638 }
1639 return ret;
1640 }
1641
1642 static int
1643 translate_compat_table(struct net *net,
1644 const char *name,
1645 unsigned int valid_hooks,
1646 struct xt_table_info **pinfo,
1647 void **pentry0,
1648 unsigned int total_size,
1649 unsigned int number,
1650 unsigned int *hook_entries,
1651 unsigned int *underflows)
1652 {
1653 unsigned int i, j;
1654 struct xt_table_info *newinfo, *info;
1655 void *pos, *entry0, *entry1;
1656 struct compat_ip6t_entry *iter0;
1657 struct ip6t_entry *iter1;
1658 unsigned int size;
1659 int ret = 0;
1660
1661 info = *pinfo;
1662 entry0 = *pentry0;
1663 size = total_size;
1664 info->number = number;
1665
1666 /* Init all hooks to impossible value. */
1667 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1668 info->hook_entry[i] = 0xFFFFFFFF;
1669 info->underflow[i] = 0xFFFFFFFF;
1670 }
1671
1672 duprintf("translate_compat_table: size %u\n", info->size);
1673 j = 0;
1674 xt_compat_lock(AF_INET6);
1675 xt_compat_init_offsets(AF_INET6, number);
1676 /* Walk through entries, checking offsets. */
1677 xt_entry_foreach(iter0, entry0, total_size) {
1678 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1679 entry0,
1680 entry0 + total_size,
1681 hook_entries,
1682 underflows,
1683 name);
1684 if (ret != 0)
1685 goto out_unlock;
1686 ++j;
1687 }
1688
1689 ret = -EINVAL;
1690 if (j != number) {
1691 duprintf("translate_compat_table: %u not %u entries\n",
1692 j, number);
1693 goto out_unlock;
1694 }
1695
1696 /* Check hooks all assigned */
1697 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1698 /* Only hooks which are valid */
1699 if (!(valid_hooks & (1 << i)))
1700 continue;
1701 if (info->hook_entry[i] == 0xFFFFFFFF) {
1702 duprintf("Invalid hook entry %u %u\n",
1703 i, hook_entries[i]);
1704 goto out_unlock;
1705 }
1706 if (info->underflow[i] == 0xFFFFFFFF) {
1707 duprintf("Invalid underflow %u %u\n",
1708 i, underflows[i]);
1709 goto out_unlock;
1710 }
1711 }
1712
1713 ret = -ENOMEM;
1714 newinfo = xt_alloc_table_info(size);
1715 if (!newinfo)
1716 goto out_unlock;
1717
1718 newinfo->number = number;
1719 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1720 newinfo->hook_entry[i] = info->hook_entry[i];
1721 newinfo->underflow[i] = info->underflow[i];
1722 }
1723 entry1 = newinfo->entries[raw_smp_processor_id()];
1724 pos = entry1;
1725 size = total_size;
1726 xt_entry_foreach(iter0, entry0, total_size) {
1727 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1728 name, newinfo, entry1);
1729 if (ret != 0)
1730 break;
1731 }
1732 xt_compat_flush_offsets(AF_INET6);
1733 xt_compat_unlock(AF_INET6);
1734 if (ret)
1735 goto free_newinfo;
1736
1737 ret = -ELOOP;
1738 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1739 goto free_newinfo;
1740
1741 i = 0;
1742 xt_entry_foreach(iter1, entry1, newinfo->size) {
1743 ret = compat_check_entry(iter1, net, name);
1744 if (ret != 0)
1745 break;
1746 ++i;
1747 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1748 XT_ERROR_TARGET) == 0)
1749 ++newinfo->stacksize;
1750 }
1751 if (ret) {
1752 /*
1753 * The first i matches need cleanup_entry (calls ->destroy)
1754 * because they had called ->check already. The other j-i
1755 * entries need only release.
1756 */
1757 int skip = i;
1758 j -= i;
1759 xt_entry_foreach(iter0, entry0, newinfo->size) {
1760 if (skip-- > 0)
1761 continue;
1762 if (j-- == 0)
1763 break;
1764 compat_release_entry(iter0);
1765 }
1766 xt_entry_foreach(iter1, entry1, newinfo->size) {
1767 if (i-- == 0)
1768 break;
1769 cleanup_entry(iter1, net);
1770 }
1771 xt_free_table_info(newinfo);
1772 return ret;
1773 }
1774
1775 /* And one copy for every other CPU */
1776 for_each_possible_cpu(i)
1777 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1778 memcpy(newinfo->entries[i], entry1, newinfo->size);
1779
1780 *pinfo = newinfo;
1781 *pentry0 = entry1;
1782 xt_free_table_info(info);
1783 return 0;
1784
1785 free_newinfo:
1786 xt_free_table_info(newinfo);
1787 out:
1788 xt_entry_foreach(iter0, entry0, total_size) {
1789 if (j-- == 0)
1790 break;
1791 compat_release_entry(iter0);
1792 }
1793 return ret;
1794 out_unlock:
1795 xt_compat_flush_offsets(AF_INET6);
1796 xt_compat_unlock(AF_INET6);
1797 goto out;
1798 }
1799
1800 static int
1801 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1802 {
1803 int ret;
1804 struct compat_ip6t_replace tmp;
1805 struct xt_table_info *newinfo;
1806 void *loc_cpu_entry;
1807 struct ip6t_entry *iter;
1808
1809 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1810 return -EFAULT;
1811
1812 /* overflow check */
1813 if (tmp.size >= INT_MAX / num_possible_cpus())
1814 return -ENOMEM;
1815 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1816 return -ENOMEM;
1817 tmp.name[sizeof(tmp.name)-1] = 0;
1818
1819 newinfo = xt_alloc_table_info(tmp.size);
1820 if (!newinfo)
1821 return -ENOMEM;
1822
1823 /* choose the copy that is on our node/cpu */
1824 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1825 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1826 tmp.size) != 0) {
1827 ret = -EFAULT;
1828 goto free_newinfo;
1829 }
1830
1831 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1832 &newinfo, &loc_cpu_entry, tmp.size,
1833 tmp.num_entries, tmp.hook_entry,
1834 tmp.underflow);
1835 if (ret != 0)
1836 goto free_newinfo;
1837
1838 duprintf("compat_do_replace: Translated table\n");
1839
1840 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1841 tmp.num_counters, compat_ptr(tmp.counters));
1842 if (ret)
1843 goto free_newinfo_untrans;
1844 return 0;
1845
1846 free_newinfo_untrans:
1847 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1848 cleanup_entry(iter, net);
1849 free_newinfo:
1850 xt_free_table_info(newinfo);
1851 return ret;
1852 }
1853
1854 static int
1855 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1856 unsigned int len)
1857 {
1858 int ret;
1859
1860 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1861 return -EPERM;
1862
1863 switch (cmd) {
1864 case IP6T_SO_SET_REPLACE:
1865 ret = compat_do_replace(sock_net(sk), user, len);
1866 break;
1867
1868 case IP6T_SO_SET_ADD_COUNTERS:
1869 ret = do_add_counters(sock_net(sk), user, len, 1);
1870 break;
1871
1872 default:
1873 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1874 ret = -EINVAL;
1875 }
1876
1877 return ret;
1878 }
1879
1880 struct compat_ip6t_get_entries {
1881 char name[XT_TABLE_MAXNAMELEN];
1882 compat_uint_t size;
1883 struct compat_ip6t_entry entrytable[0];
1884 };
1885
1886 static int
1887 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1888 void __user *userptr)
1889 {
1890 struct xt_counters *counters;
1891 const struct xt_table_info *private = table->private;
1892 void __user *pos;
1893 unsigned int size;
1894 int ret = 0;
1895 const void *loc_cpu_entry;
1896 unsigned int i = 0;
1897 struct ip6t_entry *iter;
1898
1899 counters = alloc_counters(table);
1900 if (IS_ERR(counters))
1901 return PTR_ERR(counters);
1902
1903 /* choose the copy that is on our node/cpu, ...
1904 * This choice is lazy (because current thread is
1905 * allowed to migrate to another cpu)
1906 */
1907 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1908 pos = userptr;
1909 size = total_size;
1910 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1911 ret = compat_copy_entry_to_user(iter, &pos,
1912 &size, counters, i++);
1913 if (ret != 0)
1914 break;
1915 }
1916
1917 vfree(counters);
1918 return ret;
1919 }
1920
1921 static int
1922 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1923 int *len)
1924 {
1925 int ret;
1926 struct compat_ip6t_get_entries get;
1927 struct xt_table *t;
1928
1929 if (*len < sizeof(get)) {
1930 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1931 return -EINVAL;
1932 }
1933
1934 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1935 return -EFAULT;
1936
1937 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1938 duprintf("compat_get_entries: %u != %zu\n",
1939 *len, sizeof(get) + get.size);
1940 return -EINVAL;
1941 }
1942
1943 xt_compat_lock(AF_INET6);
1944 t = xt_find_table_lock(net, AF_INET6, get.name);
1945 if (!IS_ERR_OR_NULL(t)) {
1946 const struct xt_table_info *private = t->private;
1947 struct xt_table_info info;
1948 duprintf("t->private->number = %u\n", private->number);
1949 ret = compat_table_info(private, &info);
1950 if (!ret && get.size == info.size) {
1951 ret = compat_copy_entries_to_user(private->size,
1952 t, uptr->entrytable);
1953 } else if (!ret) {
1954 duprintf("compat_get_entries: I've got %u not %u!\n",
1955 private->size, get.size);
1956 ret = -EAGAIN;
1957 }
1958 xt_compat_flush_offsets(AF_INET6);
1959 module_put(t->me);
1960 xt_table_unlock(t);
1961 } else
1962 ret = t ? PTR_ERR(t) : -ENOENT;
1963
1964 xt_compat_unlock(AF_INET6);
1965 return ret;
1966 }
1967
1968 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1969
1970 static int
1971 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1972 {
1973 int ret;
1974
1975 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1976 return -EPERM;
1977
1978 switch (cmd) {
1979 case IP6T_SO_GET_INFO:
1980 ret = get_info(sock_net(sk), user, len, 1);
1981 break;
1982 case IP6T_SO_GET_ENTRIES:
1983 ret = compat_get_entries(sock_net(sk), user, len);
1984 break;
1985 default:
1986 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1987 }
1988 return ret;
1989 }
1990 #endif
1991
1992 static int
1993 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1994 {
1995 int ret;
1996
1997 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1998 return -EPERM;
1999
2000 switch (cmd) {
2001 case IP6T_SO_SET_REPLACE:
2002 ret = do_replace(sock_net(sk), user, len);
2003 break;
2004
2005 case IP6T_SO_SET_ADD_COUNTERS:
2006 ret = do_add_counters(sock_net(sk), user, len, 0);
2007 break;
2008
2009 default:
2010 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2011 ret = -EINVAL;
2012 }
2013
2014 return ret;
2015 }
2016
2017 static int
2018 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2019 {
2020 int ret;
2021
2022 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2023 return -EPERM;
2024
2025 switch (cmd) {
2026 case IP6T_SO_GET_INFO:
2027 ret = get_info(sock_net(sk), user, len, 0);
2028 break;
2029
2030 case IP6T_SO_GET_ENTRIES:
2031 ret = get_entries(sock_net(sk), user, len);
2032 break;
2033
2034 case IP6T_SO_GET_REVISION_MATCH:
2035 case IP6T_SO_GET_REVISION_TARGET: {
2036 struct xt_get_revision rev;
2037 int target;
2038
2039 if (*len != sizeof(rev)) {
2040 ret = -EINVAL;
2041 break;
2042 }
2043 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2044 ret = -EFAULT;
2045 break;
2046 }
2047 rev.name[sizeof(rev.name)-1] = 0;
2048
2049 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2050 target = 1;
2051 else
2052 target = 0;
2053
2054 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2055 rev.revision,
2056 target, &ret),
2057 "ip6t_%s", rev.name);
2058 break;
2059 }
2060
2061 default:
2062 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2063 ret = -EINVAL;
2064 }
2065
2066 return ret;
2067 }
2068
2069 struct xt_table *ip6t_register_table(struct net *net,
2070 const struct xt_table *table,
2071 const struct ip6t_replace *repl)
2072 {
2073 int ret;
2074 struct xt_table_info *newinfo;
2075 struct xt_table_info bootstrap = {0};
2076 void *loc_cpu_entry;
2077 struct xt_table *new_table;
2078
2079 newinfo = xt_alloc_table_info(repl->size);
2080 if (!newinfo) {
2081 ret = -ENOMEM;
2082 goto out;
2083 }
2084
2085 /* choose the copy on our node/cpu, but dont care about preemption */
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size);
2088
2089 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2090 if (ret != 0)
2091 goto out_free;
2092
2093 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2094 if (IS_ERR(new_table)) {
2095 ret = PTR_ERR(new_table);
2096 goto out_free;
2097 }
2098 return new_table;
2099
2100 out_free:
2101 xt_free_table_info(newinfo);
2102 out:
2103 return ERR_PTR(ret);
2104 }
2105
2106 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2107 {
2108 struct xt_table_info *private;
2109 void *loc_cpu_entry;
2110 struct module *table_owner = table->me;
2111 struct ip6t_entry *iter;
2112
2113 private = xt_unregister_table(table);
2114
2115 /* Decrease module usage counts and free resources */
2116 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2117 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2118 cleanup_entry(iter, net);
2119 if (private->number > private->initial_entries)
2120 module_put(table_owner);
2121 xt_free_table_info(private);
2122 }
2123
2124 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2125 static inline bool
2126 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2127 u_int8_t type, u_int8_t code,
2128 bool invert)
2129 {
2130 return (type == test_type && code >= min_code && code <= max_code)
2131 ^ invert;
2132 }
2133
2134 static bool
2135 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2136 {
2137 const struct icmp6hdr *ic;
2138 struct icmp6hdr _icmph;
2139 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2140
2141 /* Must not be a fragment. */
2142 if (par->fragoff != 0)
2143 return false;
2144
2145 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2146 if (ic == NULL) {
2147 /* We've been asked to examine this packet, and we
2148 * can't. Hence, no choice but to drop.
2149 */
2150 duprintf("Dropping evil ICMP tinygram.\n");
2151 par->hotdrop = true;
2152 return false;
2153 }
2154
2155 return icmp6_type_code_match(icmpinfo->type,
2156 icmpinfo->code[0],
2157 icmpinfo->code[1],
2158 ic->icmp6_type, ic->icmp6_code,
2159 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2160 }
2161
2162 /* Called when user tries to insert an entry of this type. */
2163 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2164 {
2165 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2166
2167 /* Must specify no unknown invflags */
2168 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2169 }
2170
2171 /* The built-in targets: standard (NULL) and error. */
2172 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2173 {
2174 .name = XT_STANDARD_TARGET,
2175 .targetsize = sizeof(int),
2176 .family = NFPROTO_IPV6,
2177 #ifdef CONFIG_COMPAT
2178 .compatsize = sizeof(compat_int_t),
2179 .compat_from_user = compat_standard_from_user,
2180 .compat_to_user = compat_standard_to_user,
2181 #endif
2182 },
2183 {
2184 .name = XT_ERROR_TARGET,
2185 .target = ip6t_error,
2186 .targetsize = XT_FUNCTION_MAXNAMELEN,
2187 .family = NFPROTO_IPV6,
2188 },
2189 };
2190
2191 static struct nf_sockopt_ops ip6t_sockopts = {
2192 .pf = PF_INET6,
2193 .set_optmin = IP6T_BASE_CTL,
2194 .set_optmax = IP6T_SO_SET_MAX+1,
2195 .set = do_ip6t_set_ctl,
2196 #ifdef CONFIG_COMPAT
2197 .compat_set = compat_do_ip6t_set_ctl,
2198 #endif
2199 .get_optmin = IP6T_BASE_CTL,
2200 .get_optmax = IP6T_SO_GET_MAX+1,
2201 .get = do_ip6t_get_ctl,
2202 #ifdef CONFIG_COMPAT
2203 .compat_get = compat_do_ip6t_get_ctl,
2204 #endif
2205 .owner = THIS_MODULE,
2206 };
2207
2208 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2209 {
2210 .name = "icmp6",
2211 .match = icmp6_match,
2212 .matchsize = sizeof(struct ip6t_icmp),
2213 .checkentry = icmp6_checkentry,
2214 .proto = IPPROTO_ICMPV6,
2215 .family = NFPROTO_IPV6,
2216 },
2217 };
2218
2219 static int __net_init ip6_tables_net_init(struct net *net)
2220 {
2221 return xt_proto_init(net, NFPROTO_IPV6);
2222 }
2223
2224 static void __net_exit ip6_tables_net_exit(struct net *net)
2225 {
2226 xt_proto_fini(net, NFPROTO_IPV6);
2227 }
2228
2229 static struct pernet_operations ip6_tables_net_ops = {
2230 .init = ip6_tables_net_init,
2231 .exit = ip6_tables_net_exit,
2232 };
2233
2234 static int __init ip6_tables_init(void)
2235 {
2236 int ret;
2237
2238 ret = register_pernet_subsys(&ip6_tables_net_ops);
2239 if (ret < 0)
2240 goto err1;
2241
2242 /* No one else will be downing sem now, so we won't sleep */
2243 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2244 if (ret < 0)
2245 goto err2;
2246 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2247 if (ret < 0)
2248 goto err4;
2249
2250 /* Register setsockopt */
2251 ret = nf_register_sockopt(&ip6t_sockopts);
2252 if (ret < 0)
2253 goto err5;
2254
2255 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2256 return 0;
2257
2258 err5:
2259 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2260 err4:
2261 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2262 err2:
2263 unregister_pernet_subsys(&ip6_tables_net_ops);
2264 err1:
2265 return ret;
2266 }
2267
2268 static void __exit ip6_tables_fini(void)
2269 {
2270 nf_unregister_sockopt(&ip6t_sockopts);
2271
2272 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2273 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2274 unregister_pernet_subsys(&ip6_tables_net_ops);
2275 }
2276
2277 EXPORT_SYMBOL(ip6t_register_table);
2278 EXPORT_SYMBOL(ip6t_unregister_table);
2279 EXPORT_SYMBOL(ip6t_do_table);
2280
2281 module_init(ip6_tables_init);
2282 module_exit(ip6_tables_fini);