netfilter: x_tables: make sure e->next_offset covers remaining blob size
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
203 }
204
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
207 {
208 return ip6t_get_target((struct ip6t_entry *)e);
209 }
210
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
219 };
220
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
225 };
226
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
231 };
232
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
235 .u = {
236 .log = {
237 .level = 4,
238 .logflags = NF_LOG_MASK,
239 },
240 },
241 };
242
243 /* Mildly perf critical (only if packet tracing is on) */
244 static inline int
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
248 {
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
250
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
254 (*rulenum) = 0;
255 } else if (s == e) {
256 (*rulenum)++;
257
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
261 t->verdict < 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
267 }
268 return 1;
269 } else
270 (*rulenum)++;
271
272 return 0;
273 }
274
275 static void trace_packet(const struct sk_buff *skb,
276 unsigned int hook,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
282 {
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
289
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
292
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
299 break;
300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
304 }
305 #endif
306
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
309 {
310 return (void *)entry + entry->next_offset;
311 }
312
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ip6t_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
320 {
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
340 * match it. */
341 acpar.hotdrop = false;
342 acpar.in = in;
343 acpar.out = out;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
346
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348
349 local_bh_disable();
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 cpu = smp_processor_id();
353 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
355 stackptr = per_cpu_ptr(private->stackptr, cpu);
356 origptr = *stackptr;
357
358 e = get_entry(table_base, private->hook_entry[hook]);
359
360 do {
361 const struct xt_entry_target *t;
362 const struct xt_entry_match *ematch;
363
364 IP_NF_ASSERT(e);
365 acpar.thoff = 0;
366 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
367 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
368 no_match:
369 e = ip6t_next_entry(e);
370 continue;
371 }
372
373 xt_ematch_foreach(ematch, e) {
374 acpar.match = ematch->u.kernel.match;
375 acpar.matchinfo = ematch->data;
376 if (!acpar.match->match(skb, &acpar))
377 goto no_match;
378 }
379
380 ADD_COUNTER(e->counters, skb->len, 1);
381
382 t = ip6t_get_target_c(e);
383 IP_NF_ASSERT(t->u.kernel.target);
384
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb->nf_trace))
388 trace_packet(skb, hook, in, out,
389 table->name, private, e);
390 #endif
391 /* Standard target? */
392 if (!t->u.kernel.target->target) {
393 int v;
394
395 v = ((struct xt_standard_target *)t)->verdict;
396 if (v < 0) {
397 /* Pop from stack? */
398 if (v != XT_RETURN) {
399 verdict = (unsigned int)(-v) - 1;
400 break;
401 }
402 if (*stackptr <= origptr)
403 e = get_entry(table_base,
404 private->underflow[hook]);
405 else
406 e = ip6t_next_entry(jumpstack[--*stackptr]);
407 continue;
408 }
409 if (table_base + v != ip6t_next_entry(e) &&
410 !(e->ipv6.flags & IP6T_F_GOTO)) {
411 if (*stackptr >= private->stacksize) {
412 verdict = NF_DROP;
413 break;
414 }
415 jumpstack[(*stackptr)++] = e;
416 }
417
418 e = get_entry(table_base, v);
419 continue;
420 }
421
422 acpar.target = t->u.kernel.target;
423 acpar.targinfo = t->data;
424
425 verdict = t->u.kernel.target->target(skb, &acpar);
426 if (verdict == XT_CONTINUE)
427 e = ip6t_next_entry(e);
428 else
429 /* Verdict */
430 break;
431 } while (!acpar.hotdrop);
432
433 *stackptr = origptr;
434
435 xt_write_recseq_end(addend);
436 local_bh_enable();
437
438 #ifdef DEBUG_ALLOW_ALL
439 return NF_ACCEPT;
440 #else
441 if (acpar.hotdrop)
442 return NF_DROP;
443 else return verdict;
444 #endif
445 }
446
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
449 static int
450 mark_source_chains(const struct xt_table_info *newinfo,
451 unsigned int valid_hooks, void *entry0)
452 {
453 unsigned int hook;
454
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
458 unsigned int pos = newinfo->hook_entry[hook];
459 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
460
461 if (!(valid_hooks & (1 << hook)))
462 continue;
463
464 /* Set initial back pointer. */
465 e->counters.pcnt = pos;
466
467 for (;;) {
468 const struct xt_standard_target *t
469 = (void *)ip6t_get_target_c(e);
470 int visited = e->comefrom & (1 << hook);
471
472 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook, pos, e->comefrom);
475 return 0;
476 }
477 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
478
479 /* Unconditional return/END. */
480 if ((e->target_offset == sizeof(struct ip6t_entry) &&
481 (strcmp(t->target.u.user.name,
482 XT_STANDARD_TARGET) == 0) &&
483 t->verdict < 0 &&
484 unconditional(&e->ipv6)) || visited) {
485 unsigned int oldpos, size;
486
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
492 t->verdict);
493 return 0;
494 }
495
496 /* Return: backtrack through the last
497 big jump. */
498 do {
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
501 if (e->comefrom
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
504 "on hook %u "
505 "rule %u\n",
506 hook, pos);
507 }
508 #endif
509 oldpos = pos;
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
512
513 /* We're at the start. */
514 if (pos == oldpos)
515 goto next;
516
517 e = (struct ip6t_entry *)
518 (entry0 + pos);
519 } while (oldpos == pos + e->next_offset);
520
521 /* Move along one */
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 e->counters.pcnt = pos;
526 pos += size;
527 } else {
528 int newpos = t->verdict;
529
530 if (strcmp(t->target.u.user.name,
531 XT_STANDARD_TARGET) == 0 &&
532 newpos >= 0) {
533 if (newpos > newinfo->size -
534 sizeof(struct ip6t_entry)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
537 newpos);
538 return 0;
539 }
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
542 pos, newpos);
543 } else {
544 /* ... this is a fallthru */
545 newpos = pos + e->next_offset;
546 }
547 e = (struct ip6t_entry *)
548 (entry0 + newpos);
549 e->counters.pcnt = pos;
550 pos = newpos;
551 }
552 }
553 next:
554 duprintf("Finished chain %u\n", hook);
555 }
556 return 1;
557 }
558
559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
560 {
561 struct xt_mtdtor_param par;
562
563 par.net = net;
564 par.match = m->u.kernel.match;
565 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV6;
567 if (par.match->destroy != NULL)
568 par.match->destroy(&par);
569 module_put(par.match->me);
570 }
571
572 static int
573 check_entry(const struct ip6t_entry *e)
574 {
575 const struct xt_entry_target *t;
576
577 if (!ip6_checkentry(&e->ipv6))
578 return -EINVAL;
579
580 if (e->target_offset + sizeof(struct xt_entry_target) >
581 e->next_offset)
582 return -EINVAL;
583
584 t = ip6t_get_target_c(e);
585 if (e->target_offset + t->u.target_size > e->next_offset)
586 return -EINVAL;
587
588 return 0;
589 }
590
591 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
592 {
593 const struct ip6t_ip6 *ipv6 = par->entryinfo;
594 int ret;
595
596 par->match = m->u.kernel.match;
597 par->matchinfo = m->data;
598
599 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
600 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
601 if (ret < 0) {
602 duprintf("ip_tables: check failed for `%s'.\n",
603 par.match->name);
604 return ret;
605 }
606 return 0;
607 }
608
609 static int
610 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
611 {
612 struct xt_match *match;
613 int ret;
614
615 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
616 m->u.user.revision);
617 if (IS_ERR(match)) {
618 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
619 return PTR_ERR(match);
620 }
621 m->u.kernel.match = match;
622
623 ret = check_match(m, par);
624 if (ret)
625 goto err;
626
627 return 0;
628 err:
629 module_put(m->u.kernel.match->me);
630 return ret;
631 }
632
633 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
634 {
635 struct xt_entry_target *t = ip6t_get_target(e);
636 struct xt_tgchk_param par = {
637 .net = net,
638 .table = name,
639 .entryinfo = e,
640 .target = t->u.kernel.target,
641 .targinfo = t->data,
642 .hook_mask = e->comefrom,
643 .family = NFPROTO_IPV6,
644 };
645 int ret;
646
647 t = ip6t_get_target(e);
648 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
649 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
650 if (ret < 0) {
651 duprintf("ip_tables: check failed for `%s'.\n",
652 t->u.kernel.target->name);
653 return ret;
654 }
655 return 0;
656 }
657
658 static int
659 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
660 unsigned int size)
661 {
662 struct xt_entry_target *t;
663 struct xt_target *target;
664 int ret;
665 unsigned int j;
666 struct xt_mtchk_param mtpar;
667 struct xt_entry_match *ematch;
668
669 j = 0;
670 mtpar.net = net;
671 mtpar.table = name;
672 mtpar.entryinfo = &e->ipv6;
673 mtpar.hook_mask = e->comefrom;
674 mtpar.family = NFPROTO_IPV6;
675 xt_ematch_foreach(ematch, e) {
676 ret = find_check_match(ematch, &mtpar);
677 if (ret != 0)
678 goto cleanup_matches;
679 ++j;
680 }
681
682 t = ip6t_get_target(e);
683 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
684 t->u.user.revision);
685 if (IS_ERR(target)) {
686 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
687 ret = PTR_ERR(target);
688 goto cleanup_matches;
689 }
690 t->u.kernel.target = target;
691
692 ret = check_target(e, net, name);
693 if (ret)
694 goto err;
695 return 0;
696 err:
697 module_put(t->u.kernel.target->me);
698 cleanup_matches:
699 xt_ematch_foreach(ematch, e) {
700 if (j-- == 0)
701 break;
702 cleanup_match(ematch, net);
703 }
704 return ret;
705 }
706
707 static bool check_underflow(const struct ip6t_entry *e)
708 {
709 const struct xt_entry_target *t;
710 unsigned int verdict;
711
712 if (!unconditional(&e->ipv6))
713 return false;
714 t = ip6t_get_target_c(e);
715 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
716 return false;
717 verdict = ((struct xt_standard_target *)t)->verdict;
718 verdict = -verdict - 1;
719 return verdict == NF_DROP || verdict == NF_ACCEPT;
720 }
721
722 static int
723 check_entry_size_and_hooks(struct ip6t_entry *e,
724 struct xt_table_info *newinfo,
725 const unsigned char *base,
726 const unsigned char *limit,
727 const unsigned int *hook_entries,
728 const unsigned int *underflows,
729 unsigned int valid_hooks)
730 {
731 unsigned int h;
732 int err;
733
734 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
735 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
736 (unsigned char *)e + e->next_offset > limit) {
737 duprintf("Bad offset %p\n", e);
738 return -EINVAL;
739 }
740
741 if (e->next_offset
742 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
743 duprintf("checking: element %p size %u\n",
744 e, e->next_offset);
745 return -EINVAL;
746 }
747
748 err = check_entry(e);
749 if (err)
750 return err;
751
752 /* Check hooks & underflows */
753 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
754 if (!(valid_hooks & (1 << h)))
755 continue;
756 if ((unsigned char *)e - base == hook_entries[h])
757 newinfo->hook_entry[h] = hook_entries[h];
758 if ((unsigned char *)e - base == underflows[h]) {
759 if (!check_underflow(e)) {
760 pr_err("Underflows must be unconditional and "
761 "use the STANDARD target with "
762 "ACCEPT/DROP\n");
763 return -EINVAL;
764 }
765 newinfo->underflow[h] = underflows[h];
766 }
767 }
768
769 /* Clear counters and comefrom */
770 e->counters = ((struct xt_counters) { 0, 0 });
771 e->comefrom = 0;
772 return 0;
773 }
774
775 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
776 {
777 struct xt_tgdtor_param par;
778 struct xt_entry_target *t;
779 struct xt_entry_match *ematch;
780
781 /* Cleanup all matches */
782 xt_ematch_foreach(ematch, e)
783 cleanup_match(ematch, net);
784 t = ip6t_get_target(e);
785
786 par.net = net;
787 par.target = t->u.kernel.target;
788 par.targinfo = t->data;
789 par.family = NFPROTO_IPV6;
790 if (par.target->destroy != NULL)
791 par.target->destroy(&par);
792 module_put(par.target->me);
793 }
794
795 /* Checks and translates the user-supplied table segment (held in
796 newinfo) */
797 static int
798 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
799 const struct ip6t_replace *repl)
800 {
801 struct ip6t_entry *iter;
802 unsigned int i;
803 int ret = 0;
804
805 newinfo->size = repl->size;
806 newinfo->number = repl->num_entries;
807
808 /* Init all hooks to impossible value. */
809 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
810 newinfo->hook_entry[i] = 0xFFFFFFFF;
811 newinfo->underflow[i] = 0xFFFFFFFF;
812 }
813
814 duprintf("translate_table: size %u\n", newinfo->size);
815 i = 0;
816 /* Walk through entries, checking offsets. */
817 xt_entry_foreach(iter, entry0, newinfo->size) {
818 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
819 entry0 + repl->size,
820 repl->hook_entry,
821 repl->underflow,
822 repl->valid_hooks);
823 if (ret != 0)
824 return ret;
825 ++i;
826 if (strcmp(ip6t_get_target(iter)->u.user.name,
827 XT_ERROR_TARGET) == 0)
828 ++newinfo->stacksize;
829 }
830
831 if (i != repl->num_entries) {
832 duprintf("translate_table: %u not %u entries\n",
833 i, repl->num_entries);
834 return -EINVAL;
835 }
836
837 /* Check hooks all assigned */
838 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 /* Only hooks which are valid */
840 if (!(repl->valid_hooks & (1 << i)))
841 continue;
842 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
843 duprintf("Invalid hook entry %u %u\n",
844 i, repl->hook_entry[i]);
845 return -EINVAL;
846 }
847 if (newinfo->underflow[i] == 0xFFFFFFFF) {
848 duprintf("Invalid underflow %u %u\n",
849 i, repl->underflow[i]);
850 return -EINVAL;
851 }
852 }
853
854 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
855 return -ELOOP;
856
857 /* Finally, each sanity check must pass */
858 i = 0;
859 xt_entry_foreach(iter, entry0, newinfo->size) {
860 ret = find_check_entry(iter, net, repl->name, repl->size);
861 if (ret != 0)
862 break;
863 ++i;
864 }
865
866 if (ret != 0) {
867 xt_entry_foreach(iter, entry0, newinfo->size) {
868 if (i-- == 0)
869 break;
870 cleanup_entry(iter, net);
871 }
872 return ret;
873 }
874
875 /* And one copy for every other CPU */
876 for_each_possible_cpu(i) {
877 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
878 memcpy(newinfo->entries[i], entry0, newinfo->size);
879 }
880
881 return ret;
882 }
883
884 static void
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
887 {
888 struct ip6t_entry *iter;
889 unsigned int cpu;
890 unsigned int i;
891
892 for_each_possible_cpu(cpu) {
893 seqcount_t *s = &per_cpu(xt_recseq, cpu);
894
895 i = 0;
896 xt_entry_foreach(iter, t->entries[cpu], t->size) {
897 u64 bcnt, pcnt;
898 unsigned int start;
899
900 do {
901 start = read_seqcount_begin(s);
902 bcnt = iter->counters.bcnt;
903 pcnt = iter->counters.pcnt;
904 } while (read_seqcount_retry(s, start));
905
906 ADD_COUNTER(counters[i], bcnt, pcnt);
907 ++i;
908 }
909 }
910 }
911
912 static struct xt_counters *alloc_counters(const struct xt_table *table)
913 {
914 unsigned int countersize;
915 struct xt_counters *counters;
916 const struct xt_table_info *private = table->private;
917
918 /* We need atomic snapshot of counters: rest doesn't change
919 (other than comefrom, which userspace doesn't care
920 about). */
921 countersize = sizeof(struct xt_counters) * private->number;
922 counters = vzalloc(countersize);
923
924 if (counters == NULL)
925 return ERR_PTR(-ENOMEM);
926
927 get_counters(private, counters);
928
929 return counters;
930 }
931
932 static int
933 copy_entries_to_user(unsigned int total_size,
934 const struct xt_table *table,
935 void __user *userptr)
936 {
937 unsigned int off, num;
938 const struct ip6t_entry *e;
939 struct xt_counters *counters;
940 const struct xt_table_info *private = table->private;
941 int ret = 0;
942 const void *loc_cpu_entry;
943
944 counters = alloc_counters(table);
945 if (IS_ERR(counters))
946 return PTR_ERR(counters);
947
948 /* choose the copy that is on our node/cpu, ...
949 * This choice is lazy (because current thread is
950 * allowed to migrate to another cpu)
951 */
952 loc_cpu_entry = private->entries[raw_smp_processor_id()];
953 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
954 ret = -EFAULT;
955 goto free_counters;
956 }
957
958 /* FIXME: use iterator macros --RR */
959 /* ... then go back and fix counters and names */
960 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
961 unsigned int i;
962 const struct xt_entry_match *m;
963 const struct xt_entry_target *t;
964
965 e = (struct ip6t_entry *)(loc_cpu_entry + off);
966 if (copy_to_user(userptr + off
967 + offsetof(struct ip6t_entry, counters),
968 &counters[num],
969 sizeof(counters[num])) != 0) {
970 ret = -EFAULT;
971 goto free_counters;
972 }
973
974 for (i = sizeof(struct ip6t_entry);
975 i < e->target_offset;
976 i += m->u.match_size) {
977 m = (void *)e + i;
978
979 if (copy_to_user(userptr + off + i
980 + offsetof(struct xt_entry_match,
981 u.user.name),
982 m->u.kernel.match->name,
983 strlen(m->u.kernel.match->name)+1)
984 != 0) {
985 ret = -EFAULT;
986 goto free_counters;
987 }
988 }
989
990 t = ip6t_get_target_c(e);
991 if (copy_to_user(userptr + off + e->target_offset
992 + offsetof(struct xt_entry_target,
993 u.user.name),
994 t->u.kernel.target->name,
995 strlen(t->u.kernel.target->name)+1) != 0) {
996 ret = -EFAULT;
997 goto free_counters;
998 }
999 }
1000
1001 free_counters:
1002 vfree(counters);
1003 return ret;
1004 }
1005
1006 #ifdef CONFIG_COMPAT
1007 static void compat_standard_from_user(void *dst, const void *src)
1008 {
1009 int v = *(compat_int_t *)src;
1010
1011 if (v > 0)
1012 v += xt_compat_calc_jump(AF_INET6, v);
1013 memcpy(dst, &v, sizeof(v));
1014 }
1015
1016 static int compat_standard_to_user(void __user *dst, const void *src)
1017 {
1018 compat_int_t cv = *(int *)src;
1019
1020 if (cv > 0)
1021 cv -= xt_compat_calc_jump(AF_INET6, cv);
1022 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1023 }
1024
1025 static int compat_calc_entry(const struct ip6t_entry *e,
1026 const struct xt_table_info *info,
1027 const void *base, struct xt_table_info *newinfo)
1028 {
1029 const struct xt_entry_match *ematch;
1030 const struct xt_entry_target *t;
1031 unsigned int entry_offset;
1032 int off, i, ret;
1033
1034 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1035 entry_offset = (void *)e - base;
1036 xt_ematch_foreach(ematch, e)
1037 off += xt_compat_match_offset(ematch->u.kernel.match);
1038 t = ip6t_get_target_c(e);
1039 off += xt_compat_target_offset(t->u.kernel.target);
1040 newinfo->size -= off;
1041 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1042 if (ret)
1043 return ret;
1044
1045 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1046 if (info->hook_entry[i] &&
1047 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1048 newinfo->hook_entry[i] -= off;
1049 if (info->underflow[i] &&
1050 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1051 newinfo->underflow[i] -= off;
1052 }
1053 return 0;
1054 }
1055
1056 static int compat_table_info(const struct xt_table_info *info,
1057 struct xt_table_info *newinfo)
1058 {
1059 struct ip6t_entry *iter;
1060 void *loc_cpu_entry;
1061 int ret;
1062
1063 if (!newinfo || !info)
1064 return -EINVAL;
1065
1066 /* we dont care about newinfo->entries[] */
1067 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1068 newinfo->initial_entries = 0;
1069 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1070 xt_compat_init_offsets(AF_INET6, info->number);
1071 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1072 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1073 if (ret != 0)
1074 return ret;
1075 }
1076 return 0;
1077 }
1078 #endif
1079
1080 static int get_info(struct net *net, void __user *user,
1081 const int *len, int compat)
1082 {
1083 char name[XT_TABLE_MAXNAMELEN];
1084 struct xt_table *t;
1085 int ret;
1086
1087 if (*len != sizeof(struct ip6t_getinfo)) {
1088 duprintf("length %u != %zu\n", *len,
1089 sizeof(struct ip6t_getinfo));
1090 return -EINVAL;
1091 }
1092
1093 if (copy_from_user(name, user, sizeof(name)) != 0)
1094 return -EFAULT;
1095
1096 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1097 #ifdef CONFIG_COMPAT
1098 if (compat)
1099 xt_compat_lock(AF_INET6);
1100 #endif
1101 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1102 "ip6table_%s", name);
1103 if (!IS_ERR_OR_NULL(t)) {
1104 struct ip6t_getinfo info;
1105 const struct xt_table_info *private = t->private;
1106 #ifdef CONFIG_COMPAT
1107 struct xt_table_info tmp;
1108
1109 if (compat) {
1110 ret = compat_table_info(private, &tmp);
1111 xt_compat_flush_offsets(AF_INET6);
1112 private = &tmp;
1113 }
1114 #endif
1115 memset(&info, 0, sizeof(info));
1116 info.valid_hooks = t->valid_hooks;
1117 memcpy(info.hook_entry, private->hook_entry,
1118 sizeof(info.hook_entry));
1119 memcpy(info.underflow, private->underflow,
1120 sizeof(info.underflow));
1121 info.num_entries = private->number;
1122 info.size = private->size;
1123 strcpy(info.name, name);
1124
1125 if (copy_to_user(user, &info, *len) != 0)
1126 ret = -EFAULT;
1127 else
1128 ret = 0;
1129
1130 xt_table_unlock(t);
1131 module_put(t->me);
1132 } else
1133 ret = t ? PTR_ERR(t) : -ENOENT;
1134 #ifdef CONFIG_COMPAT
1135 if (compat)
1136 xt_compat_unlock(AF_INET6);
1137 #endif
1138 return ret;
1139 }
1140
1141 static int
1142 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1143 const int *len)
1144 {
1145 int ret;
1146 struct ip6t_get_entries get;
1147 struct xt_table *t;
1148
1149 if (*len < sizeof(get)) {
1150 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1151 return -EINVAL;
1152 }
1153 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1154 return -EFAULT;
1155 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1156 duprintf("get_entries: %u != %zu\n",
1157 *len, sizeof(get) + get.size);
1158 return -EINVAL;
1159 }
1160
1161 t = xt_find_table_lock(net, AF_INET6, get.name);
1162 if (!IS_ERR_OR_NULL(t)) {
1163 struct xt_table_info *private = t->private;
1164 duprintf("t->private->number = %u\n", private->number);
1165 if (get.size == private->size)
1166 ret = copy_entries_to_user(private->size,
1167 t, uptr->entrytable);
1168 else {
1169 duprintf("get_entries: I've got %u not %u!\n",
1170 private->size, get.size);
1171 ret = -EAGAIN;
1172 }
1173 module_put(t->me);
1174 xt_table_unlock(t);
1175 } else
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1177
1178 return ret;
1179 }
1180
1181 static int
1182 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1183 struct xt_table_info *newinfo, unsigned int num_counters,
1184 void __user *counters_ptr)
1185 {
1186 int ret;
1187 struct xt_table *t;
1188 struct xt_table_info *oldinfo;
1189 struct xt_counters *counters;
1190 const void *loc_cpu_old_entry;
1191 struct ip6t_entry *iter;
1192
1193 ret = 0;
1194 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1195 if (!counters) {
1196 ret = -ENOMEM;
1197 goto out;
1198 }
1199
1200 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1201 "ip6table_%s", name);
1202 if (IS_ERR_OR_NULL(t)) {
1203 ret = t ? PTR_ERR(t) : -ENOENT;
1204 goto free_newinfo_counters_untrans;
1205 }
1206
1207 /* You lied! */
1208 if (valid_hooks != t->valid_hooks) {
1209 duprintf("Valid hook crap: %08X vs %08X\n",
1210 valid_hooks, t->valid_hooks);
1211 ret = -EINVAL;
1212 goto put_module;
1213 }
1214
1215 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1216 if (!oldinfo)
1217 goto put_module;
1218
1219 /* Update module usage count based on number of rules */
1220 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1221 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1222 if ((oldinfo->number > oldinfo->initial_entries) ||
1223 (newinfo->number <= oldinfo->initial_entries))
1224 module_put(t->me);
1225 if ((oldinfo->number > oldinfo->initial_entries) &&
1226 (newinfo->number <= oldinfo->initial_entries))
1227 module_put(t->me);
1228
1229 /* Get the old counters, and synchronize with replace */
1230 get_counters(oldinfo, counters);
1231
1232 /* Decrease module usage counts and free resource */
1233 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1234 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1235 cleanup_entry(iter, net);
1236
1237 xt_free_table_info(oldinfo);
1238 if (copy_to_user(counters_ptr, counters,
1239 sizeof(struct xt_counters) * num_counters) != 0) {
1240 /* Silent error, can't fail, new table is already in place */
1241 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1242 }
1243 vfree(counters);
1244 xt_table_unlock(t);
1245 return ret;
1246
1247 put_module:
1248 module_put(t->me);
1249 xt_table_unlock(t);
1250 free_newinfo_counters_untrans:
1251 vfree(counters);
1252 out:
1253 return ret;
1254 }
1255
1256 static int
1257 do_replace(struct net *net, const void __user *user, unsigned int len)
1258 {
1259 int ret;
1260 struct ip6t_replace tmp;
1261 struct xt_table_info *newinfo;
1262 void *loc_cpu_entry;
1263 struct ip6t_entry *iter;
1264
1265 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1266 return -EFAULT;
1267
1268 /* overflow check */
1269 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1270 return -ENOMEM;
1271 tmp.name[sizeof(tmp.name)-1] = 0;
1272
1273 newinfo = xt_alloc_table_info(tmp.size);
1274 if (!newinfo)
1275 return -ENOMEM;
1276
1277 /* choose the copy that is on our node/cpu */
1278 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1279 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1280 tmp.size) != 0) {
1281 ret = -EFAULT;
1282 goto free_newinfo;
1283 }
1284
1285 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1286 if (ret != 0)
1287 goto free_newinfo;
1288
1289 duprintf("ip_tables: Translated table\n");
1290
1291 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1292 tmp.num_counters, tmp.counters);
1293 if (ret)
1294 goto free_newinfo_untrans;
1295 return 0;
1296
1297 free_newinfo_untrans:
1298 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1299 cleanup_entry(iter, net);
1300 free_newinfo:
1301 xt_free_table_info(newinfo);
1302 return ret;
1303 }
1304
1305 static int
1306 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1307 int compat)
1308 {
1309 unsigned int i, curcpu;
1310 struct xt_counters_info tmp;
1311 struct xt_counters *paddc;
1312 unsigned int num_counters;
1313 char *name;
1314 int size;
1315 void *ptmp;
1316 struct xt_table *t;
1317 const struct xt_table_info *private;
1318 int ret = 0;
1319 const void *loc_cpu_entry;
1320 struct ip6t_entry *iter;
1321 unsigned int addend;
1322 #ifdef CONFIG_COMPAT
1323 struct compat_xt_counters_info compat_tmp;
1324
1325 if (compat) {
1326 ptmp = &compat_tmp;
1327 size = sizeof(struct compat_xt_counters_info);
1328 } else
1329 #endif
1330 {
1331 ptmp = &tmp;
1332 size = sizeof(struct xt_counters_info);
1333 }
1334
1335 if (copy_from_user(ptmp, user, size) != 0)
1336 return -EFAULT;
1337
1338 #ifdef CONFIG_COMPAT
1339 if (compat) {
1340 num_counters = compat_tmp.num_counters;
1341 name = compat_tmp.name;
1342 } else
1343 #endif
1344 {
1345 num_counters = tmp.num_counters;
1346 name = tmp.name;
1347 }
1348
1349 if (len != size + num_counters * sizeof(struct xt_counters))
1350 return -EINVAL;
1351
1352 paddc = vmalloc(len - size);
1353 if (!paddc)
1354 return -ENOMEM;
1355
1356 if (copy_from_user(paddc, user + size, len - size) != 0) {
1357 ret = -EFAULT;
1358 goto free;
1359 }
1360
1361 t = xt_find_table_lock(net, AF_INET6, name);
1362 if (IS_ERR_OR_NULL(t)) {
1363 ret = t ? PTR_ERR(t) : -ENOENT;
1364 goto free;
1365 }
1366
1367
1368 local_bh_disable();
1369 private = t->private;
1370 if (private->number != num_counters) {
1371 ret = -EINVAL;
1372 goto unlock_up_free;
1373 }
1374
1375 i = 0;
1376 /* Choose the copy that is on our node */
1377 curcpu = smp_processor_id();
1378 addend = xt_write_recseq_begin();
1379 loc_cpu_entry = private->entries[curcpu];
1380 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1381 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1382 ++i;
1383 }
1384 xt_write_recseq_end(addend);
1385
1386 unlock_up_free:
1387 local_bh_enable();
1388 xt_table_unlock(t);
1389 module_put(t->me);
1390 free:
1391 vfree(paddc);
1392
1393 return ret;
1394 }
1395
1396 #ifdef CONFIG_COMPAT
1397 struct compat_ip6t_replace {
1398 char name[XT_TABLE_MAXNAMELEN];
1399 u32 valid_hooks;
1400 u32 num_entries;
1401 u32 size;
1402 u32 hook_entry[NF_INET_NUMHOOKS];
1403 u32 underflow[NF_INET_NUMHOOKS];
1404 u32 num_counters;
1405 compat_uptr_t counters; /* struct xt_counters * */
1406 struct compat_ip6t_entry entries[0];
1407 };
1408
1409 static int
1410 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1411 unsigned int *size, struct xt_counters *counters,
1412 unsigned int i)
1413 {
1414 struct xt_entry_target *t;
1415 struct compat_ip6t_entry __user *ce;
1416 u_int16_t target_offset, next_offset;
1417 compat_uint_t origsize;
1418 const struct xt_entry_match *ematch;
1419 int ret = 0;
1420
1421 origsize = *size;
1422 ce = (struct compat_ip6t_entry __user *)*dstptr;
1423 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1424 copy_to_user(&ce->counters, &counters[i],
1425 sizeof(counters[i])) != 0)
1426 return -EFAULT;
1427
1428 *dstptr += sizeof(struct compat_ip6t_entry);
1429 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1430
1431 xt_ematch_foreach(ematch, e) {
1432 ret = xt_compat_match_to_user(ematch, dstptr, size);
1433 if (ret != 0)
1434 return ret;
1435 }
1436 target_offset = e->target_offset - (origsize - *size);
1437 t = ip6t_get_target(e);
1438 ret = xt_compat_target_to_user(t, dstptr, size);
1439 if (ret)
1440 return ret;
1441 next_offset = e->next_offset - (origsize - *size);
1442 if (put_user(target_offset, &ce->target_offset) != 0 ||
1443 put_user(next_offset, &ce->next_offset) != 0)
1444 return -EFAULT;
1445 return 0;
1446 }
1447
1448 static int
1449 compat_find_calc_match(struct xt_entry_match *m,
1450 const char *name,
1451 const struct ip6t_ip6 *ipv6,
1452 unsigned int hookmask,
1453 int *size)
1454 {
1455 struct xt_match *match;
1456
1457 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1458 m->u.user.revision);
1459 if (IS_ERR(match)) {
1460 duprintf("compat_check_calc_match: `%s' not found\n",
1461 m->u.user.name);
1462 return PTR_ERR(match);
1463 }
1464 m->u.kernel.match = match;
1465 *size += xt_compat_match_offset(match);
1466 return 0;
1467 }
1468
1469 static void compat_release_entry(struct compat_ip6t_entry *e)
1470 {
1471 struct xt_entry_target *t;
1472 struct xt_entry_match *ematch;
1473
1474 /* Cleanup all matches */
1475 xt_ematch_foreach(ematch, e)
1476 module_put(ematch->u.kernel.match->me);
1477 t = compat_ip6t_get_target(e);
1478 module_put(t->u.kernel.target->me);
1479 }
1480
1481 static int
1482 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1483 struct xt_table_info *newinfo,
1484 unsigned int *size,
1485 const unsigned char *base,
1486 const unsigned char *limit,
1487 const unsigned int *hook_entries,
1488 const unsigned int *underflows,
1489 const char *name)
1490 {
1491 struct xt_entry_match *ematch;
1492 struct xt_entry_target *t;
1493 struct xt_target *target;
1494 unsigned int entry_offset;
1495 unsigned int j;
1496 int ret, off, h;
1497
1498 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1499 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1500 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1501 (unsigned char *)e + e->next_offset > limit) {
1502 duprintf("Bad offset %p, limit = %p\n", e, limit);
1503 return -EINVAL;
1504 }
1505
1506 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1507 sizeof(struct compat_xt_entry_target)) {
1508 duprintf("checking: element %p size %u\n",
1509 e, e->next_offset);
1510 return -EINVAL;
1511 }
1512
1513 /* For purposes of check_entry casting the compat entry is fine */
1514 ret = check_entry((struct ip6t_entry *)e);
1515 if (ret)
1516 return ret;
1517
1518 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1519 entry_offset = (void *)e - (void *)base;
1520 j = 0;
1521 xt_ematch_foreach(ematch, e) {
1522 ret = compat_find_calc_match(ematch, name,
1523 &e->ipv6, e->comefrom, &off);
1524 if (ret != 0)
1525 goto release_matches;
1526 ++j;
1527 }
1528
1529 t = compat_ip6t_get_target(e);
1530 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1531 t->u.user.revision);
1532 if (IS_ERR(target)) {
1533 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1534 t->u.user.name);
1535 ret = PTR_ERR(target);
1536 goto release_matches;
1537 }
1538 t->u.kernel.target = target;
1539
1540 off += xt_compat_target_offset(target);
1541 *size += off;
1542 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1543 if (ret)
1544 goto out;
1545
1546 /* Check hooks & underflows */
1547 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1548 if ((unsigned char *)e - base == hook_entries[h])
1549 newinfo->hook_entry[h] = hook_entries[h];
1550 if ((unsigned char *)e - base == underflows[h])
1551 newinfo->underflow[h] = underflows[h];
1552 }
1553
1554 /* Clear counters and comefrom */
1555 memset(&e->counters, 0, sizeof(e->counters));
1556 e->comefrom = 0;
1557 return 0;
1558
1559 out:
1560 module_put(t->u.kernel.target->me);
1561 release_matches:
1562 xt_ematch_foreach(ematch, e) {
1563 if (j-- == 0)
1564 break;
1565 module_put(ematch->u.kernel.match->me);
1566 }
1567 return ret;
1568 }
1569
1570 static int
1571 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1572 unsigned int *size, const char *name,
1573 struct xt_table_info *newinfo, unsigned char *base)
1574 {
1575 struct xt_entry_target *t;
1576 struct ip6t_entry *de;
1577 unsigned int origsize;
1578 int ret, h;
1579 struct xt_entry_match *ematch;
1580
1581 ret = 0;
1582 origsize = *size;
1583 de = (struct ip6t_entry *)*dstptr;
1584 memcpy(de, e, sizeof(struct ip6t_entry));
1585 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1586
1587 *dstptr += sizeof(struct ip6t_entry);
1588 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1589
1590 xt_ematch_foreach(ematch, e) {
1591 ret = xt_compat_match_from_user(ematch, dstptr, size);
1592 if (ret != 0)
1593 return ret;
1594 }
1595 de->target_offset = e->target_offset - (origsize - *size);
1596 t = compat_ip6t_get_target(e);
1597 xt_compat_target_from_user(t, dstptr, size);
1598
1599 de->next_offset = e->next_offset - (origsize - *size);
1600 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1601 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1602 newinfo->hook_entry[h] -= origsize - *size;
1603 if ((unsigned char *)de - base < newinfo->underflow[h])
1604 newinfo->underflow[h] -= origsize - *size;
1605 }
1606 return ret;
1607 }
1608
1609 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1610 const char *name)
1611 {
1612 unsigned int j;
1613 int ret = 0;
1614 struct xt_mtchk_param mtpar;
1615 struct xt_entry_match *ematch;
1616
1617 j = 0;
1618 mtpar.net = net;
1619 mtpar.table = name;
1620 mtpar.entryinfo = &e->ipv6;
1621 mtpar.hook_mask = e->comefrom;
1622 mtpar.family = NFPROTO_IPV6;
1623 xt_ematch_foreach(ematch, e) {
1624 ret = check_match(ematch, &mtpar);
1625 if (ret != 0)
1626 goto cleanup_matches;
1627 ++j;
1628 }
1629
1630 ret = check_target(e, net, name);
1631 if (ret)
1632 goto cleanup_matches;
1633 return 0;
1634
1635 cleanup_matches:
1636 xt_ematch_foreach(ematch, e) {
1637 if (j-- == 0)
1638 break;
1639 cleanup_match(ematch, net);
1640 }
1641 return ret;
1642 }
1643
1644 static int
1645 translate_compat_table(struct net *net,
1646 const char *name,
1647 unsigned int valid_hooks,
1648 struct xt_table_info **pinfo,
1649 void **pentry0,
1650 unsigned int total_size,
1651 unsigned int number,
1652 unsigned int *hook_entries,
1653 unsigned int *underflows)
1654 {
1655 unsigned int i, j;
1656 struct xt_table_info *newinfo, *info;
1657 void *pos, *entry0, *entry1;
1658 struct compat_ip6t_entry *iter0;
1659 struct ip6t_entry *iter1;
1660 unsigned int size;
1661 int ret = 0;
1662
1663 info = *pinfo;
1664 entry0 = *pentry0;
1665 size = total_size;
1666 info->number = number;
1667
1668 /* Init all hooks to impossible value. */
1669 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1670 info->hook_entry[i] = 0xFFFFFFFF;
1671 info->underflow[i] = 0xFFFFFFFF;
1672 }
1673
1674 duprintf("translate_compat_table: size %u\n", info->size);
1675 j = 0;
1676 xt_compat_lock(AF_INET6);
1677 xt_compat_init_offsets(AF_INET6, number);
1678 /* Walk through entries, checking offsets. */
1679 xt_entry_foreach(iter0, entry0, total_size) {
1680 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1681 entry0,
1682 entry0 + total_size,
1683 hook_entries,
1684 underflows,
1685 name);
1686 if (ret != 0)
1687 goto out_unlock;
1688 ++j;
1689 }
1690
1691 ret = -EINVAL;
1692 if (j != number) {
1693 duprintf("translate_compat_table: %u not %u entries\n",
1694 j, number);
1695 goto out_unlock;
1696 }
1697
1698 /* Check hooks all assigned */
1699 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1700 /* Only hooks which are valid */
1701 if (!(valid_hooks & (1 << i)))
1702 continue;
1703 if (info->hook_entry[i] == 0xFFFFFFFF) {
1704 duprintf("Invalid hook entry %u %u\n",
1705 i, hook_entries[i]);
1706 goto out_unlock;
1707 }
1708 if (info->underflow[i] == 0xFFFFFFFF) {
1709 duprintf("Invalid underflow %u %u\n",
1710 i, underflows[i]);
1711 goto out_unlock;
1712 }
1713 }
1714
1715 ret = -ENOMEM;
1716 newinfo = xt_alloc_table_info(size);
1717 if (!newinfo)
1718 goto out_unlock;
1719
1720 newinfo->number = number;
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 newinfo->hook_entry[i] = info->hook_entry[i];
1723 newinfo->underflow[i] = info->underflow[i];
1724 }
1725 entry1 = newinfo->entries[raw_smp_processor_id()];
1726 pos = entry1;
1727 size = total_size;
1728 xt_entry_foreach(iter0, entry0, total_size) {
1729 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1730 name, newinfo, entry1);
1731 if (ret != 0)
1732 break;
1733 }
1734 xt_compat_flush_offsets(AF_INET6);
1735 xt_compat_unlock(AF_INET6);
1736 if (ret)
1737 goto free_newinfo;
1738
1739 ret = -ELOOP;
1740 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1741 goto free_newinfo;
1742
1743 i = 0;
1744 xt_entry_foreach(iter1, entry1, newinfo->size) {
1745 ret = compat_check_entry(iter1, net, name);
1746 if (ret != 0)
1747 break;
1748 ++i;
1749 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1750 XT_ERROR_TARGET) == 0)
1751 ++newinfo->stacksize;
1752 }
1753 if (ret) {
1754 /*
1755 * The first i matches need cleanup_entry (calls ->destroy)
1756 * because they had called ->check already. The other j-i
1757 * entries need only release.
1758 */
1759 int skip = i;
1760 j -= i;
1761 xt_entry_foreach(iter0, entry0, newinfo->size) {
1762 if (skip-- > 0)
1763 continue;
1764 if (j-- == 0)
1765 break;
1766 compat_release_entry(iter0);
1767 }
1768 xt_entry_foreach(iter1, entry1, newinfo->size) {
1769 if (i-- == 0)
1770 break;
1771 cleanup_entry(iter1, net);
1772 }
1773 xt_free_table_info(newinfo);
1774 return ret;
1775 }
1776
1777 /* And one copy for every other CPU */
1778 for_each_possible_cpu(i)
1779 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1780 memcpy(newinfo->entries[i], entry1, newinfo->size);
1781
1782 *pinfo = newinfo;
1783 *pentry0 = entry1;
1784 xt_free_table_info(info);
1785 return 0;
1786
1787 free_newinfo:
1788 xt_free_table_info(newinfo);
1789 out:
1790 xt_entry_foreach(iter0, entry0, total_size) {
1791 if (j-- == 0)
1792 break;
1793 compat_release_entry(iter0);
1794 }
1795 return ret;
1796 out_unlock:
1797 xt_compat_flush_offsets(AF_INET6);
1798 xt_compat_unlock(AF_INET6);
1799 goto out;
1800 }
1801
1802 static int
1803 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1804 {
1805 int ret;
1806 struct compat_ip6t_replace tmp;
1807 struct xt_table_info *newinfo;
1808 void *loc_cpu_entry;
1809 struct ip6t_entry *iter;
1810
1811 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1812 return -EFAULT;
1813
1814 /* overflow check */
1815 if (tmp.size >= INT_MAX / num_possible_cpus())
1816 return -ENOMEM;
1817 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1818 return -ENOMEM;
1819 tmp.name[sizeof(tmp.name)-1] = 0;
1820
1821 newinfo = xt_alloc_table_info(tmp.size);
1822 if (!newinfo)
1823 return -ENOMEM;
1824
1825 /* choose the copy that is on our node/cpu */
1826 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1827 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1828 tmp.size) != 0) {
1829 ret = -EFAULT;
1830 goto free_newinfo;
1831 }
1832
1833 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1834 &newinfo, &loc_cpu_entry, tmp.size,
1835 tmp.num_entries, tmp.hook_entry,
1836 tmp.underflow);
1837 if (ret != 0)
1838 goto free_newinfo;
1839
1840 duprintf("compat_do_replace: Translated table\n");
1841
1842 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1843 tmp.num_counters, compat_ptr(tmp.counters));
1844 if (ret)
1845 goto free_newinfo_untrans;
1846 return 0;
1847
1848 free_newinfo_untrans:
1849 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1850 cleanup_entry(iter, net);
1851 free_newinfo:
1852 xt_free_table_info(newinfo);
1853 return ret;
1854 }
1855
1856 static int
1857 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1858 unsigned int len)
1859 {
1860 int ret;
1861
1862 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1863 return -EPERM;
1864
1865 switch (cmd) {
1866 case IP6T_SO_SET_REPLACE:
1867 ret = compat_do_replace(sock_net(sk), user, len);
1868 break;
1869
1870 case IP6T_SO_SET_ADD_COUNTERS:
1871 ret = do_add_counters(sock_net(sk), user, len, 1);
1872 break;
1873
1874 default:
1875 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1876 ret = -EINVAL;
1877 }
1878
1879 return ret;
1880 }
1881
1882 struct compat_ip6t_get_entries {
1883 char name[XT_TABLE_MAXNAMELEN];
1884 compat_uint_t size;
1885 struct compat_ip6t_entry entrytable[0];
1886 };
1887
1888 static int
1889 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1890 void __user *userptr)
1891 {
1892 struct xt_counters *counters;
1893 const struct xt_table_info *private = table->private;
1894 void __user *pos;
1895 unsigned int size;
1896 int ret = 0;
1897 const void *loc_cpu_entry;
1898 unsigned int i = 0;
1899 struct ip6t_entry *iter;
1900
1901 counters = alloc_counters(table);
1902 if (IS_ERR(counters))
1903 return PTR_ERR(counters);
1904
1905 /* choose the copy that is on our node/cpu, ...
1906 * This choice is lazy (because current thread is
1907 * allowed to migrate to another cpu)
1908 */
1909 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1910 pos = userptr;
1911 size = total_size;
1912 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1913 ret = compat_copy_entry_to_user(iter, &pos,
1914 &size, counters, i++);
1915 if (ret != 0)
1916 break;
1917 }
1918
1919 vfree(counters);
1920 return ret;
1921 }
1922
1923 static int
1924 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1925 int *len)
1926 {
1927 int ret;
1928 struct compat_ip6t_get_entries get;
1929 struct xt_table *t;
1930
1931 if (*len < sizeof(get)) {
1932 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1933 return -EINVAL;
1934 }
1935
1936 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1937 return -EFAULT;
1938
1939 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1940 duprintf("compat_get_entries: %u != %zu\n",
1941 *len, sizeof(get) + get.size);
1942 return -EINVAL;
1943 }
1944
1945 xt_compat_lock(AF_INET6);
1946 t = xt_find_table_lock(net, AF_INET6, get.name);
1947 if (!IS_ERR_OR_NULL(t)) {
1948 const struct xt_table_info *private = t->private;
1949 struct xt_table_info info;
1950 duprintf("t->private->number = %u\n", private->number);
1951 ret = compat_table_info(private, &info);
1952 if (!ret && get.size == info.size) {
1953 ret = compat_copy_entries_to_user(private->size,
1954 t, uptr->entrytable);
1955 } else if (!ret) {
1956 duprintf("compat_get_entries: I've got %u not %u!\n",
1957 private->size, get.size);
1958 ret = -EAGAIN;
1959 }
1960 xt_compat_flush_offsets(AF_INET6);
1961 module_put(t->me);
1962 xt_table_unlock(t);
1963 } else
1964 ret = t ? PTR_ERR(t) : -ENOENT;
1965
1966 xt_compat_unlock(AF_INET6);
1967 return ret;
1968 }
1969
1970 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1971
1972 static int
1973 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1974 {
1975 int ret;
1976
1977 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1978 return -EPERM;
1979
1980 switch (cmd) {
1981 case IP6T_SO_GET_INFO:
1982 ret = get_info(sock_net(sk), user, len, 1);
1983 break;
1984 case IP6T_SO_GET_ENTRIES:
1985 ret = compat_get_entries(sock_net(sk), user, len);
1986 break;
1987 default:
1988 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1989 }
1990 return ret;
1991 }
1992 #endif
1993
1994 static int
1995 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1996 {
1997 int ret;
1998
1999 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2000 return -EPERM;
2001
2002 switch (cmd) {
2003 case IP6T_SO_SET_REPLACE:
2004 ret = do_replace(sock_net(sk), user, len);
2005 break;
2006
2007 case IP6T_SO_SET_ADD_COUNTERS:
2008 ret = do_add_counters(sock_net(sk), user, len, 0);
2009 break;
2010
2011 default:
2012 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2013 ret = -EINVAL;
2014 }
2015
2016 return ret;
2017 }
2018
2019 static int
2020 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2021 {
2022 int ret;
2023
2024 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2025 return -EPERM;
2026
2027 switch (cmd) {
2028 case IP6T_SO_GET_INFO:
2029 ret = get_info(sock_net(sk), user, len, 0);
2030 break;
2031
2032 case IP6T_SO_GET_ENTRIES:
2033 ret = get_entries(sock_net(sk), user, len);
2034 break;
2035
2036 case IP6T_SO_GET_REVISION_MATCH:
2037 case IP6T_SO_GET_REVISION_TARGET: {
2038 struct xt_get_revision rev;
2039 int target;
2040
2041 if (*len != sizeof(rev)) {
2042 ret = -EINVAL;
2043 break;
2044 }
2045 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2046 ret = -EFAULT;
2047 break;
2048 }
2049 rev.name[sizeof(rev.name)-1] = 0;
2050
2051 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2052 target = 1;
2053 else
2054 target = 0;
2055
2056 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2057 rev.revision,
2058 target, &ret),
2059 "ip6t_%s", rev.name);
2060 break;
2061 }
2062
2063 default:
2064 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2065 ret = -EINVAL;
2066 }
2067
2068 return ret;
2069 }
2070
2071 struct xt_table *ip6t_register_table(struct net *net,
2072 const struct xt_table *table,
2073 const struct ip6t_replace *repl)
2074 {
2075 int ret;
2076 struct xt_table_info *newinfo;
2077 struct xt_table_info bootstrap = {0};
2078 void *loc_cpu_entry;
2079 struct xt_table *new_table;
2080
2081 newinfo = xt_alloc_table_info(repl->size);
2082 if (!newinfo) {
2083 ret = -ENOMEM;
2084 goto out;
2085 }
2086
2087 /* choose the copy on our node/cpu, but dont care about preemption */
2088 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2089 memcpy(loc_cpu_entry, repl->entries, repl->size);
2090
2091 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2092 if (ret != 0)
2093 goto out_free;
2094
2095 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2096 if (IS_ERR(new_table)) {
2097 ret = PTR_ERR(new_table);
2098 goto out_free;
2099 }
2100 return new_table;
2101
2102 out_free:
2103 xt_free_table_info(newinfo);
2104 out:
2105 return ERR_PTR(ret);
2106 }
2107
2108 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2109 {
2110 struct xt_table_info *private;
2111 void *loc_cpu_entry;
2112 struct module *table_owner = table->me;
2113 struct ip6t_entry *iter;
2114
2115 private = xt_unregister_table(table);
2116
2117 /* Decrease module usage counts and free resources */
2118 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2119 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2120 cleanup_entry(iter, net);
2121 if (private->number > private->initial_entries)
2122 module_put(table_owner);
2123 xt_free_table_info(private);
2124 }
2125
2126 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2127 static inline bool
2128 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2129 u_int8_t type, u_int8_t code,
2130 bool invert)
2131 {
2132 return (type == test_type && code >= min_code && code <= max_code)
2133 ^ invert;
2134 }
2135
2136 static bool
2137 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2138 {
2139 const struct icmp6hdr *ic;
2140 struct icmp6hdr _icmph;
2141 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2142
2143 /* Must not be a fragment. */
2144 if (par->fragoff != 0)
2145 return false;
2146
2147 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2148 if (ic == NULL) {
2149 /* We've been asked to examine this packet, and we
2150 * can't. Hence, no choice but to drop.
2151 */
2152 duprintf("Dropping evil ICMP tinygram.\n");
2153 par->hotdrop = true;
2154 return false;
2155 }
2156
2157 return icmp6_type_code_match(icmpinfo->type,
2158 icmpinfo->code[0],
2159 icmpinfo->code[1],
2160 ic->icmp6_type, ic->icmp6_code,
2161 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2162 }
2163
2164 /* Called when user tries to insert an entry of this type. */
2165 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2166 {
2167 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2168
2169 /* Must specify no unknown invflags */
2170 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2171 }
2172
2173 /* The built-in targets: standard (NULL) and error. */
2174 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2175 {
2176 .name = XT_STANDARD_TARGET,
2177 .targetsize = sizeof(int),
2178 .family = NFPROTO_IPV6,
2179 #ifdef CONFIG_COMPAT
2180 .compatsize = sizeof(compat_int_t),
2181 .compat_from_user = compat_standard_from_user,
2182 .compat_to_user = compat_standard_to_user,
2183 #endif
2184 },
2185 {
2186 .name = XT_ERROR_TARGET,
2187 .target = ip6t_error,
2188 .targetsize = XT_FUNCTION_MAXNAMELEN,
2189 .family = NFPROTO_IPV6,
2190 },
2191 };
2192
2193 static struct nf_sockopt_ops ip6t_sockopts = {
2194 .pf = PF_INET6,
2195 .set_optmin = IP6T_BASE_CTL,
2196 .set_optmax = IP6T_SO_SET_MAX+1,
2197 .set = do_ip6t_set_ctl,
2198 #ifdef CONFIG_COMPAT
2199 .compat_set = compat_do_ip6t_set_ctl,
2200 #endif
2201 .get_optmin = IP6T_BASE_CTL,
2202 .get_optmax = IP6T_SO_GET_MAX+1,
2203 .get = do_ip6t_get_ctl,
2204 #ifdef CONFIG_COMPAT
2205 .compat_get = compat_do_ip6t_get_ctl,
2206 #endif
2207 .owner = THIS_MODULE,
2208 };
2209
2210 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2211 {
2212 .name = "icmp6",
2213 .match = icmp6_match,
2214 .matchsize = sizeof(struct ip6t_icmp),
2215 .checkentry = icmp6_checkentry,
2216 .proto = IPPROTO_ICMPV6,
2217 .family = NFPROTO_IPV6,
2218 },
2219 };
2220
2221 static int __net_init ip6_tables_net_init(struct net *net)
2222 {
2223 return xt_proto_init(net, NFPROTO_IPV6);
2224 }
2225
2226 static void __net_exit ip6_tables_net_exit(struct net *net)
2227 {
2228 xt_proto_fini(net, NFPROTO_IPV6);
2229 }
2230
2231 static struct pernet_operations ip6_tables_net_ops = {
2232 .init = ip6_tables_net_init,
2233 .exit = ip6_tables_net_exit,
2234 };
2235
2236 static int __init ip6_tables_init(void)
2237 {
2238 int ret;
2239
2240 ret = register_pernet_subsys(&ip6_tables_net_ops);
2241 if (ret < 0)
2242 goto err1;
2243
2244 /* No one else will be downing sem now, so we won't sleep */
2245 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2246 if (ret < 0)
2247 goto err2;
2248 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2249 if (ret < 0)
2250 goto err4;
2251
2252 /* Register setsockopt */
2253 ret = nf_register_sockopt(&ip6t_sockopts);
2254 if (ret < 0)
2255 goto err5;
2256
2257 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2258 return 0;
2259
2260 err5:
2261 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2262 err4:
2263 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2264 err2:
2265 unregister_pernet_subsys(&ip6_tables_net_ops);
2266 err1:
2267 return ret;
2268 }
2269
2270 static void __exit ip6_tables_fini(void)
2271 {
2272 nf_unregister_sockopt(&ip6t_sockopts);
2273
2274 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2275 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2276 unregister_pernet_subsys(&ip6_tables_net_ops);
2277 }
2278
2279 EXPORT_SYMBOL(ip6t_register_table);
2280 EXPORT_SYMBOL(ip6t_unregister_table);
2281 EXPORT_SYMBOL(ip6t_do_table);
2282
2283 module_init(ip6_tables_init);
2284 module_exit(ip6_tables_fini);