Merge tag 'v3.10.102' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry *e)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return e->target_offset == sizeof(struct ip6t_entry) &&
203 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
204 }
205
206
207 static inline const struct xt_entry_target *
208 ip6t_get_target_c(const struct ip6t_entry *e)
209 {
210 return ip6t_get_target((struct ip6t_entry *)e);
211 }
212
213 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
214 /* This cries for unification! */
215 static const char *const hooknames[] = {
216 [NF_INET_PRE_ROUTING] = "PREROUTING",
217 [NF_INET_LOCAL_IN] = "INPUT",
218 [NF_INET_FORWARD] = "FORWARD",
219 [NF_INET_LOCAL_OUT] = "OUTPUT",
220 [NF_INET_POST_ROUTING] = "POSTROUTING",
221 };
222
223 enum nf_ip_trace_comments {
224 NF_IP6_TRACE_COMMENT_RULE,
225 NF_IP6_TRACE_COMMENT_RETURN,
226 NF_IP6_TRACE_COMMENT_POLICY,
227 };
228
229 static const char *const comments[] = {
230 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
231 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
232 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
233 };
234
235 static struct nf_loginfo trace_loginfo = {
236 .type = NF_LOG_TYPE_LOG,
237 .u = {
238 .log = {
239 .level = 4,
240 .logflags = NF_LOG_MASK,
241 },
242 },
243 };
244
245 /* Mildly perf critical (only if packet tracing is on) */
246 static inline int
247 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
248 const char *hookname, const char **chainname,
249 const char **comment, unsigned int *rulenum)
250 {
251 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
252
253 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
254 /* Head of user chain: ERROR target with chainname */
255 *chainname = t->target.data;
256 (*rulenum) = 0;
257 } else if (s == e) {
258 (*rulenum)++;
259
260 if (unconditional(s) &&
261 strcmp(t->target.u.kernel.target->name,
262 XT_STANDARD_TARGET) == 0 &&
263 t->verdict < 0) {
264 /* Tail of chains: STANDARD target (return/policy) */
265 *comment = *chainname == hookname
266 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
267 : comments[NF_IP6_TRACE_COMMENT_RETURN];
268 }
269 return 1;
270 } else
271 (*rulenum)++;
272
273 return 0;
274 }
275
276 static void trace_packet(const struct sk_buff *skb,
277 unsigned int hook,
278 const struct net_device *in,
279 const struct net_device *out,
280 const char *tablename,
281 const struct xt_table_info *private,
282 const struct ip6t_entry *e)
283 {
284 const void *table_base;
285 const struct ip6t_entry *root;
286 const char *hookname, *chainname, *comment;
287 const struct ip6t_entry *iter;
288 unsigned int rulenum = 0;
289 struct net *net = dev_net(in ? in : out);
290
291 table_base = private->entries[smp_processor_id()];
292 root = get_entry(table_base, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline __pure struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
316 ip6t_do_table(struct sk_buff *skb,
317 unsigned int hook,
318 const struct net_device *in,
319 const struct net_device *out,
320 struct xt_table *table)
321 {
322 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
323 /* Initializing verdict to NF_DROP keeps gcc happy. */
324 unsigned int verdict = NF_DROP;
325 const char *indev, *outdev;
326 const void *table_base;
327 struct ip6t_entry *e, **jumpstack;
328 unsigned int *stackptr, origptr, cpu;
329 const struct xt_table_info *private;
330 struct xt_action_param acpar;
331 unsigned int addend;
332
333 /* Initialization */
334 indev = in ? in->name : nulldevname;
335 outdev = out ? out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.in = in;
344 acpar.out = out;
345 acpar.family = NFPROTO_IPV6;
346 acpar.hooknum = hook;
347
348 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
349
350 local_bh_disable();
351 addend = xt_write_recseq_begin();
352 private = table->private;
353 cpu = smp_processor_id();
354 table_base = private->entries[cpu];
355 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
356 stackptr = per_cpu_ptr(private->stackptr, cpu);
357 origptr = *stackptr;
358
359 e = get_entry(table_base, private->hook_entry[hook]);
360
361 do {
362 const struct xt_entry_target *t;
363 const struct xt_entry_match *ematch;
364
365 IP_NF_ASSERT(e);
366 acpar.thoff = 0;
367 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
368 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
369 no_match:
370 e = ip6t_next_entry(e);
371 continue;
372 }
373
374 xt_ematch_foreach(ematch, e) {
375 acpar.match = ematch->u.kernel.match;
376 acpar.matchinfo = ematch->data;
377 if (!acpar.match->match(skb, &acpar))
378 goto no_match;
379 }
380
381 ADD_COUNTER(e->counters, skb->len, 1);
382
383 t = ip6t_get_target_c(e);
384 IP_NF_ASSERT(t->u.kernel.target);
385
386 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
387 /* The packet is traced: log it */
388 if (unlikely(skb->nf_trace))
389 trace_packet(skb, hook, in, out,
390 table->name, private, e);
391 #endif
392 /* Standard target? */
393 if (!t->u.kernel.target->target) {
394 int v;
395
396 v = ((struct xt_standard_target *)t)->verdict;
397 if (v < 0) {
398 /* Pop from stack? */
399 if (v != XT_RETURN) {
400 verdict = (unsigned int)(-v) - 1;
401 break;
402 }
403 if (*stackptr <= origptr)
404 e = get_entry(table_base,
405 private->underflow[hook]);
406 else
407 e = ip6t_next_entry(jumpstack[--*stackptr]);
408 continue;
409 }
410 if (table_base + v != ip6t_next_entry(e) &&
411 !(e->ipv6.flags & IP6T_F_GOTO)) {
412 if (*stackptr >= private->stacksize) {
413 verdict = NF_DROP;
414 break;
415 }
416 jumpstack[(*stackptr)++] = e;
417 }
418
419 e = get_entry(table_base, v);
420 continue;
421 }
422
423 acpar.target = t->u.kernel.target;
424 acpar.targinfo = t->data;
425
426 verdict = t->u.kernel.target->target(skb, &acpar);
427 if (verdict == XT_CONTINUE)
428 e = ip6t_next_entry(e);
429 else
430 /* Verdict */
431 break;
432 } while (!acpar.hotdrop);
433
434 *stackptr = origptr;
435
436 xt_write_recseq_end(addend);
437 local_bh_enable();
438
439 #ifdef DEBUG_ALLOW_ALL
440 return NF_ACCEPT;
441 #else
442 if (acpar.hotdrop)
443 return NF_DROP;
444 else return verdict;
445 #endif
446 }
447
448 /* Figures out from what hook each rule can be called: returns 0 if
449 there are loops. Puts hook bitmask in comefrom. */
450 static int
451 mark_source_chains(const struct xt_table_info *newinfo,
452 unsigned int valid_hooks, void *entry0)
453 {
454 unsigned int hook;
455
456 /* No recursion; use packet counter to save back ptrs (reset
457 to 0 as we leave), and comefrom to save source hook bitmask */
458 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
459 unsigned int pos = newinfo->hook_entry[hook];
460 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
461
462 if (!(valid_hooks & (1 << hook)))
463 continue;
464
465 /* Set initial back pointer. */
466 e->counters.pcnt = pos;
467
468 for (;;) {
469 const struct xt_standard_target *t
470 = (void *)ip6t_get_target_c(e);
471 int visited = e->comefrom & (1 << hook);
472
473 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
474 pr_err("iptables: loop hook %u pos %u %08X.\n",
475 hook, pos, e->comefrom);
476 return 0;
477 }
478 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
479
480 /* Unconditional return/END. */
481 if ((unconditional(e) &&
482 (strcmp(t->target.u.user.name,
483 XT_STANDARD_TARGET) == 0) &&
484 t->verdict < 0) || visited) {
485 unsigned int oldpos, size;
486
487 if ((strcmp(t->target.u.user.name,
488 XT_STANDARD_TARGET) == 0) &&
489 t->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
492 t->verdict);
493 return 0;
494 }
495
496 /* Return: backtrack through the last
497 big jump. */
498 do {
499 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
500 #ifdef DEBUG_IP_FIREWALL_USER
501 if (e->comefrom
502 & (1 << NF_INET_NUMHOOKS)) {
503 duprintf("Back unset "
504 "on hook %u "
505 "rule %u\n",
506 hook, pos);
507 }
508 #endif
509 oldpos = pos;
510 pos = e->counters.pcnt;
511 e->counters.pcnt = 0;
512
513 /* We're at the start. */
514 if (pos == oldpos)
515 goto next;
516
517 e = (struct ip6t_entry *)
518 (entry0 + pos);
519 } while (oldpos == pos + e->next_offset);
520
521 /* Move along one */
522 size = e->next_offset;
523 e = (struct ip6t_entry *)
524 (entry0 + pos + size);
525 e->counters.pcnt = pos;
526 pos += size;
527 } else {
528 int newpos = t->verdict;
529
530 if (strcmp(t->target.u.user.name,
531 XT_STANDARD_TARGET) == 0 &&
532 newpos >= 0) {
533 if (newpos > newinfo->size -
534 sizeof(struct ip6t_entry)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
537 newpos);
538 return 0;
539 }
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
542 pos, newpos);
543 } else {
544 /* ... this is a fallthru */
545 newpos = pos + e->next_offset;
546 }
547 e = (struct ip6t_entry *)
548 (entry0 + newpos);
549 e->counters.pcnt = pos;
550 pos = newpos;
551 }
552 }
553 next:
554 duprintf("Finished chain %u\n", hook);
555 }
556 return 1;
557 }
558
559 static void cleanup_match(struct xt_entry_match *m, struct net *net)
560 {
561 struct xt_mtdtor_param par;
562
563 par.net = net;
564 par.match = m->u.kernel.match;
565 par.matchinfo = m->data;
566 par.family = NFPROTO_IPV6;
567 if (par.match->destroy != NULL)
568 par.match->destroy(&par);
569 module_put(par.match->me);
570 }
571
572 static int
573 check_entry(const struct ip6t_entry *e)
574 {
575 const struct xt_entry_target *t;
576
577 if (!ip6_checkentry(&e->ipv6))
578 return -EINVAL;
579
580 if (e->target_offset + sizeof(struct xt_entry_target) >
581 e->next_offset)
582 return -EINVAL;
583
584 t = ip6t_get_target_c(e);
585 if (e->target_offset + t->u.target_size > e->next_offset)
586 return -EINVAL;
587
588 return 0;
589 }
590
591
592 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
593 {
594 const struct ip6t_ip6 *ipv6 = par->entryinfo;
595 int ret;
596
597 par->match = m->u.kernel.match;
598 par->matchinfo = m->data;
599
600 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
601 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
602 if (ret < 0) {
603 duprintf("ip_tables: check failed for `%s'.\n",
604 par.match->name);
605 return ret;
606 }
607 return 0;
608 }
609
610 static int
611 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
612 {
613 struct xt_match *match;
614 int ret;
615
616 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
617 m->u.user.revision);
618 if (IS_ERR(match)) {
619 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
620 return PTR_ERR(match);
621 }
622 m->u.kernel.match = match;
623
624 ret = check_match(m, par);
625 if (ret)
626 goto err;
627
628 return 0;
629 err:
630 module_put(m->u.kernel.match->me);
631 return ret;
632 }
633
634 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
635 {
636 struct xt_entry_target *t = ip6t_get_target(e);
637 struct xt_tgchk_param par = {
638 .net = net,
639 .table = name,
640 .entryinfo = e,
641 .target = t->u.kernel.target,
642 .targinfo = t->data,
643 .hook_mask = e->comefrom,
644 .family = NFPROTO_IPV6,
645 };
646 int ret;
647
648 t = ip6t_get_target(e);
649 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
650 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
651 if (ret < 0) {
652 duprintf("ip_tables: check failed for `%s'.\n",
653 t->u.kernel.target->name);
654 return ret;
655 }
656 return 0;
657 }
658
659 static int
660 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
661 unsigned int size)
662 {
663 struct xt_entry_target *t;
664 struct xt_target *target;
665 int ret;
666 unsigned int j;
667 struct xt_mtchk_param mtpar;
668 struct xt_entry_match *ematch;
669
670 j = 0;
671 mtpar.net = net;
672 mtpar.table = name;
673 mtpar.entryinfo = &e->ipv6;
674 mtpar.hook_mask = e->comefrom;
675 mtpar.family = NFPROTO_IPV6;
676 xt_ematch_foreach(ematch, e) {
677 ret = find_check_match(ematch, &mtpar);
678 if (ret != 0)
679 goto cleanup_matches;
680 ++j;
681 }
682
683 t = ip6t_get_target(e);
684 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
685 t->u.user.revision);
686 if (IS_ERR(target)) {
687 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
688 ret = PTR_ERR(target);
689 goto cleanup_matches;
690 }
691 t->u.kernel.target = target;
692
693 ret = check_target(e, net, name);
694 if (ret)
695 goto err;
696 return 0;
697 err:
698 module_put(t->u.kernel.target->me);
699 cleanup_matches:
700 xt_ematch_foreach(ematch, e) {
701 if (j-- == 0)
702 break;
703 cleanup_match(ematch, net);
704 }
705 return ret;
706 }
707
708 static bool check_underflow(const struct ip6t_entry *e)
709 {
710 const struct xt_entry_target *t;
711 unsigned int verdict;
712
713 if (!unconditional(e))
714 return false;
715 t = ip6t_get_target_c(e);
716 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
717 return false;
718 verdict = ((struct xt_standard_target *)t)->verdict;
719 verdict = -verdict - 1;
720 return verdict == NF_DROP || verdict == NF_ACCEPT;
721 }
722
723 static int
724 check_entry_size_and_hooks(struct ip6t_entry *e,
725 struct xt_table_info *newinfo,
726 const unsigned char *base,
727 const unsigned char *limit,
728 const unsigned int *hook_entries,
729 const unsigned int *underflows,
730 unsigned int valid_hooks)
731 {
732 unsigned int h;
733 int err;
734
735 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
736 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
737 (unsigned char *)e + e->next_offset > limit) {
738 duprintf("Bad offset %p\n", e);
739 return -EINVAL;
740 }
741
742 if (e->next_offset
743 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
744 duprintf("checking: element %p size %u\n",
745 e, e->next_offset);
746 return -EINVAL;
747 }
748
749 err = check_entry(e);
750 if (err)
751 return err;
752
753 /* Check hooks & underflows */
754 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
755 if (!(valid_hooks & (1 << h)))
756 continue;
757 if ((unsigned char *)e - base == hook_entries[h])
758 newinfo->hook_entry[h] = hook_entries[h];
759 if ((unsigned char *)e - base == underflows[h]) {
760 if (!check_underflow(e)) {
761 pr_err("Underflows must be unconditional and "
762 "use the STANDARD target with "
763 "ACCEPT/DROP\n");
764 return -EINVAL;
765 }
766 newinfo->underflow[h] = underflows[h];
767 }
768 }
769
770 /* Clear counters and comefrom */
771 e->counters = ((struct xt_counters) { 0, 0 });
772 e->comefrom = 0;
773 return 0;
774 }
775
776 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
777 {
778 struct xt_tgdtor_param par;
779 struct xt_entry_target *t;
780 struct xt_entry_match *ematch;
781
782 /* Cleanup all matches */
783 xt_ematch_foreach(ematch, e)
784 cleanup_match(ematch, net);
785 t = ip6t_get_target(e);
786
787 par.net = net;
788 par.target = t->u.kernel.target;
789 par.targinfo = t->data;
790 par.family = NFPROTO_IPV6;
791 if (par.target->destroy != NULL)
792 par.target->destroy(&par);
793 module_put(par.target->me);
794 }
795
796 /* Checks and translates the user-supplied table segment (held in
797 newinfo) */
798 static int
799 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
800 const struct ip6t_replace *repl)
801 {
802 struct ip6t_entry *iter;
803 unsigned int i;
804 int ret = 0;
805
806 newinfo->size = repl->size;
807 newinfo->number = repl->num_entries;
808
809 /* Init all hooks to impossible value. */
810 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
811 newinfo->hook_entry[i] = 0xFFFFFFFF;
812 newinfo->underflow[i] = 0xFFFFFFFF;
813 }
814
815 duprintf("translate_table: size %u\n", newinfo->size);
816 i = 0;
817 /* Walk through entries, checking offsets. */
818 xt_entry_foreach(iter, entry0, newinfo->size) {
819 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
820 entry0 + repl->size,
821 repl->hook_entry,
822 repl->underflow,
823 repl->valid_hooks);
824 if (ret != 0)
825 return ret;
826 ++i;
827 if (strcmp(ip6t_get_target(iter)->u.user.name,
828 XT_ERROR_TARGET) == 0)
829 ++newinfo->stacksize;
830 }
831
832 if (i != repl->num_entries) {
833 duprintf("translate_table: %u not %u entries\n",
834 i, repl->num_entries);
835 return -EINVAL;
836 }
837
838 /* Check hooks all assigned */
839 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
840 /* Only hooks which are valid */
841 if (!(repl->valid_hooks & (1 << i)))
842 continue;
843 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
844 duprintf("Invalid hook entry %u %u\n",
845 i, repl->hook_entry[i]);
846 return -EINVAL;
847 }
848 if (newinfo->underflow[i] == 0xFFFFFFFF) {
849 duprintf("Invalid underflow %u %u\n",
850 i, repl->underflow[i]);
851 return -EINVAL;
852 }
853 }
854
855 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
856 return -ELOOP;
857
858 /* Finally, each sanity check must pass */
859 i = 0;
860 xt_entry_foreach(iter, entry0, newinfo->size) {
861 ret = find_check_entry(iter, net, repl->name, repl->size);
862 if (ret != 0)
863 break;
864 ++i;
865 }
866
867 if (ret != 0) {
868 xt_entry_foreach(iter, entry0, newinfo->size) {
869 if (i-- == 0)
870 break;
871 cleanup_entry(iter, net);
872 }
873 return ret;
874 }
875
876 /* And one copy for every other CPU */
877 for_each_possible_cpu(i) {
878 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
879 memcpy(newinfo->entries[i], entry0, newinfo->size);
880 }
881
882 return ret;
883 }
884
885 static void
886 get_counters(const struct xt_table_info *t,
887 struct xt_counters counters[])
888 {
889 struct ip6t_entry *iter;
890 unsigned int cpu;
891 unsigned int i;
892
893 for_each_possible_cpu(cpu) {
894 seqcount_t *s = &per_cpu(xt_recseq, cpu);
895
896 i = 0;
897 xt_entry_foreach(iter, t->entries[cpu], t->size) {
898 u64 bcnt, pcnt;
899 unsigned int start;
900
901 do {
902 start = read_seqcount_begin(s);
903 bcnt = iter->counters.bcnt;
904 pcnt = iter->counters.pcnt;
905 } while (read_seqcount_retry(s, start));
906
907 ADD_COUNTER(counters[i], bcnt, pcnt);
908 ++i;
909 }
910 }
911 }
912
913 static struct xt_counters *alloc_counters(const struct xt_table *table)
914 {
915 unsigned int countersize;
916 struct xt_counters *counters;
917 const struct xt_table_info *private = table->private;
918
919 /* We need atomic snapshot of counters: rest doesn't change
920 (other than comefrom, which userspace doesn't care
921 about). */
922 countersize = sizeof(struct xt_counters) * private->number;
923 counters = vzalloc(countersize);
924
925 if (counters == NULL)
926 return ERR_PTR(-ENOMEM);
927
928 get_counters(private, counters);
929
930 return counters;
931 }
932
933 static int
934 copy_entries_to_user(unsigned int total_size,
935 const struct xt_table *table,
936 void __user *userptr)
937 {
938 unsigned int off, num;
939 const struct ip6t_entry *e;
940 struct xt_counters *counters;
941 const struct xt_table_info *private = table->private;
942 int ret = 0;
943 const void *loc_cpu_entry;
944
945 counters = alloc_counters(table);
946 if (IS_ERR(counters))
947 return PTR_ERR(counters);
948
949 /* choose the copy that is on our node/cpu, ...
950 * This choice is lazy (because current thread is
951 * allowed to migrate to another cpu)
952 */
953 loc_cpu_entry = private->entries[raw_smp_processor_id()];
954 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
955 ret = -EFAULT;
956 goto free_counters;
957 }
958
959 /* FIXME: use iterator macros --RR */
960 /* ... then go back and fix counters and names */
961 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
962 unsigned int i;
963 const struct xt_entry_match *m;
964 const struct xt_entry_target *t;
965
966 e = (struct ip6t_entry *)(loc_cpu_entry + off);
967 if (copy_to_user(userptr + off
968 + offsetof(struct ip6t_entry, counters),
969 &counters[num],
970 sizeof(counters[num])) != 0) {
971 ret = -EFAULT;
972 goto free_counters;
973 }
974
975 for (i = sizeof(struct ip6t_entry);
976 i < e->target_offset;
977 i += m->u.match_size) {
978 m = (void *)e + i;
979
980 if (copy_to_user(userptr + off + i
981 + offsetof(struct xt_entry_match,
982 u.user.name),
983 m->u.kernel.match->name,
984 strlen(m->u.kernel.match->name)+1)
985 != 0) {
986 ret = -EFAULT;
987 goto free_counters;
988 }
989 }
990
991 t = ip6t_get_target_c(e);
992 if (copy_to_user(userptr + off + e->target_offset
993 + offsetof(struct xt_entry_target,
994 u.user.name),
995 t->u.kernel.target->name,
996 strlen(t->u.kernel.target->name)+1) != 0) {
997 ret = -EFAULT;
998 goto free_counters;
999 }
1000 }
1001
1002 free_counters:
1003 vfree(counters);
1004 return ret;
1005 }
1006
1007 #ifdef CONFIG_COMPAT
1008 static void compat_standard_from_user(void *dst, const void *src)
1009 {
1010 int v = *(compat_int_t *)src;
1011
1012 if (v > 0)
1013 v += xt_compat_calc_jump(AF_INET6, v);
1014 memcpy(dst, &v, sizeof(v));
1015 }
1016
1017 static int compat_standard_to_user(void __user *dst, const void *src)
1018 {
1019 compat_int_t cv = *(int *)src;
1020
1021 if (cv > 0)
1022 cv -= xt_compat_calc_jump(AF_INET6, cv);
1023 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1024 }
1025
1026 static int compat_calc_entry(const struct ip6t_entry *e,
1027 const struct xt_table_info *info,
1028 const void *base, struct xt_table_info *newinfo)
1029 {
1030 const struct xt_entry_match *ematch;
1031 const struct xt_entry_target *t;
1032 unsigned int entry_offset;
1033 int off, i, ret;
1034
1035 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1036 entry_offset = (void *)e - base;
1037 xt_ematch_foreach(ematch, e)
1038 off += xt_compat_match_offset(ematch->u.kernel.match);
1039 t = ip6t_get_target_c(e);
1040 off += xt_compat_target_offset(t->u.kernel.target);
1041 newinfo->size -= off;
1042 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1043 if (ret)
1044 return ret;
1045
1046 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1047 if (info->hook_entry[i] &&
1048 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1049 newinfo->hook_entry[i] -= off;
1050 if (info->underflow[i] &&
1051 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1052 newinfo->underflow[i] -= off;
1053 }
1054 return 0;
1055 }
1056
1057 static int compat_table_info(const struct xt_table_info *info,
1058 struct xt_table_info *newinfo)
1059 {
1060 struct ip6t_entry *iter;
1061 void *loc_cpu_entry;
1062 int ret;
1063
1064 if (!newinfo || !info)
1065 return -EINVAL;
1066
1067 /* we dont care about newinfo->entries[] */
1068 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1069 newinfo->initial_entries = 0;
1070 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1071 xt_compat_init_offsets(AF_INET6, info->number);
1072 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1073 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1074 if (ret != 0)
1075 return ret;
1076 }
1077 return 0;
1078 }
1079 #endif
1080
1081 static int get_info(struct net *net, void __user *user,
1082 const int *len, int compat)
1083 {
1084 char name[XT_TABLE_MAXNAMELEN];
1085 struct xt_table *t;
1086 int ret;
1087
1088 if (*len != sizeof(struct ip6t_getinfo)) {
1089 duprintf("length %u != %zu\n", *len,
1090 sizeof(struct ip6t_getinfo));
1091 return -EINVAL;
1092 }
1093
1094 if (copy_from_user(name, user, sizeof(name)) != 0)
1095 return -EFAULT;
1096
1097 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1098 #ifdef CONFIG_COMPAT
1099 if (compat)
1100 xt_compat_lock(AF_INET6);
1101 #endif
1102 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1103 "ip6table_%s", name);
1104 if (!IS_ERR_OR_NULL(t)) {
1105 struct ip6t_getinfo info;
1106 const struct xt_table_info *private = t->private;
1107 #ifdef CONFIG_COMPAT
1108 struct xt_table_info tmp;
1109
1110 if (compat) {
1111 ret = compat_table_info(private, &tmp);
1112 xt_compat_flush_offsets(AF_INET6);
1113 private = &tmp;
1114 }
1115 #endif
1116 memset(&info, 0, sizeof(info));
1117 info.valid_hooks = t->valid_hooks;
1118 memcpy(info.hook_entry, private->hook_entry,
1119 sizeof(info.hook_entry));
1120 memcpy(info.underflow, private->underflow,
1121 sizeof(info.underflow));
1122 info.num_entries = private->number;
1123 info.size = private->size;
1124 strcpy(info.name, name);
1125
1126 if (copy_to_user(user, &info, *len) != 0)
1127 ret = -EFAULT;
1128 else
1129 ret = 0;
1130
1131 xt_table_unlock(t);
1132 module_put(t->me);
1133 } else
1134 ret = t ? PTR_ERR(t) : -ENOENT;
1135 #ifdef CONFIG_COMPAT
1136 if (compat)
1137 xt_compat_unlock(AF_INET6);
1138 #endif
1139 return ret;
1140 }
1141
1142 static int
1143 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1144 const int *len)
1145 {
1146 int ret;
1147 struct ip6t_get_entries get;
1148 struct xt_table *t;
1149
1150 if (*len < sizeof(get)) {
1151 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1152 return -EINVAL;
1153 }
1154 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1155 return -EFAULT;
1156 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1157 duprintf("get_entries: %u != %zu\n",
1158 *len, sizeof(get) + get.size);
1159 return -EINVAL;
1160 }
1161
1162 t = xt_find_table_lock(net, AF_INET6, get.name);
1163 if (!IS_ERR_OR_NULL(t)) {
1164 struct xt_table_info *private = t->private;
1165 duprintf("t->private->number = %u\n", private->number);
1166 if (get.size == private->size)
1167 ret = copy_entries_to_user(private->size,
1168 t, uptr->entrytable);
1169 else {
1170 duprintf("get_entries: I've got %u not %u!\n",
1171 private->size, get.size);
1172 ret = -EAGAIN;
1173 }
1174 module_put(t->me);
1175 xt_table_unlock(t);
1176 } else
1177 ret = t ? PTR_ERR(t) : -ENOENT;
1178
1179 return ret;
1180 }
1181
1182 static int
1183 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1184 struct xt_table_info *newinfo, unsigned int num_counters,
1185 void __user *counters_ptr)
1186 {
1187 int ret;
1188 struct xt_table *t;
1189 struct xt_table_info *oldinfo;
1190 struct xt_counters *counters;
1191 const void *loc_cpu_old_entry;
1192 struct ip6t_entry *iter;
1193
1194 ret = 0;
1195 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1196 if (!counters) {
1197 ret = -ENOMEM;
1198 goto out;
1199 }
1200
1201 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1202 "ip6table_%s", name);
1203 if (IS_ERR_OR_NULL(t)) {
1204 ret = t ? PTR_ERR(t) : -ENOENT;
1205 goto free_newinfo_counters_untrans;
1206 }
1207
1208 /* You lied! */
1209 if (valid_hooks != t->valid_hooks) {
1210 duprintf("Valid hook crap: %08X vs %08X\n",
1211 valid_hooks, t->valid_hooks);
1212 ret = -EINVAL;
1213 goto put_module;
1214 }
1215
1216 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1217 if (!oldinfo)
1218 goto put_module;
1219
1220 /* Update module usage count based on number of rules */
1221 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1222 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1223 if ((oldinfo->number > oldinfo->initial_entries) ||
1224 (newinfo->number <= oldinfo->initial_entries))
1225 module_put(t->me);
1226 if ((oldinfo->number > oldinfo->initial_entries) &&
1227 (newinfo->number <= oldinfo->initial_entries))
1228 module_put(t->me);
1229
1230 /* Get the old counters, and synchronize with replace */
1231 get_counters(oldinfo, counters);
1232
1233 /* Decrease module usage counts and free resource */
1234 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1235 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1236 cleanup_entry(iter, net);
1237
1238 xt_free_table_info(oldinfo);
1239 if (copy_to_user(counters_ptr, counters,
1240 sizeof(struct xt_counters) * num_counters) != 0) {
1241 /* Silent error, can't fail, new table is already in place */
1242 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1243 }
1244 vfree(counters);
1245 xt_table_unlock(t);
1246 return ret;
1247
1248 put_module:
1249 module_put(t->me);
1250 xt_table_unlock(t);
1251 free_newinfo_counters_untrans:
1252 vfree(counters);
1253 out:
1254 return ret;
1255 }
1256
1257 static int
1258 do_replace(struct net *net, const void __user *user, unsigned int len)
1259 {
1260 int ret;
1261 struct ip6t_replace tmp;
1262 struct xt_table_info *newinfo;
1263 void *loc_cpu_entry;
1264 struct ip6t_entry *iter;
1265
1266 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1267 return -EFAULT;
1268
1269 /* overflow check */
1270 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1271 return -ENOMEM;
1272 tmp.name[sizeof(tmp.name)-1] = 0;
1273
1274 newinfo = xt_alloc_table_info(tmp.size);
1275 if (!newinfo)
1276 return -ENOMEM;
1277
1278 /* choose the copy that is on our node/cpu */
1279 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1281 tmp.size) != 0) {
1282 ret = -EFAULT;
1283 goto free_newinfo;
1284 }
1285
1286 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1287 if (ret != 0)
1288 goto free_newinfo;
1289
1290 duprintf("ip_tables: Translated table\n");
1291
1292 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1293 tmp.num_counters, tmp.counters);
1294 if (ret)
1295 goto free_newinfo_untrans;
1296 return 0;
1297
1298 free_newinfo_untrans:
1299 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1300 cleanup_entry(iter, net);
1301 free_newinfo:
1302 xt_free_table_info(newinfo);
1303 return ret;
1304 }
1305
1306 static int
1307 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1308 int compat)
1309 {
1310 unsigned int i, curcpu;
1311 struct xt_counters_info tmp;
1312 struct xt_counters *paddc;
1313 unsigned int num_counters;
1314 char *name;
1315 int size;
1316 void *ptmp;
1317 struct xt_table *t;
1318 const struct xt_table_info *private;
1319 int ret = 0;
1320 const void *loc_cpu_entry;
1321 struct ip6t_entry *iter;
1322 unsigned int addend;
1323 #ifdef CONFIG_COMPAT
1324 struct compat_xt_counters_info compat_tmp;
1325
1326 if (compat) {
1327 ptmp = &compat_tmp;
1328 size = sizeof(struct compat_xt_counters_info);
1329 } else
1330 #endif
1331 {
1332 ptmp = &tmp;
1333 size = sizeof(struct xt_counters_info);
1334 }
1335
1336 if (copy_from_user(ptmp, user, size) != 0)
1337 return -EFAULT;
1338
1339 #ifdef CONFIG_COMPAT
1340 if (compat) {
1341 num_counters = compat_tmp.num_counters;
1342 name = compat_tmp.name;
1343 } else
1344 #endif
1345 {
1346 num_counters = tmp.num_counters;
1347 name = tmp.name;
1348 }
1349
1350 if (len != size + num_counters * sizeof(struct xt_counters))
1351 return -EINVAL;
1352
1353 paddc = vmalloc(len - size);
1354 if (!paddc)
1355 return -ENOMEM;
1356
1357 if (copy_from_user(paddc, user + size, len - size) != 0) {
1358 ret = -EFAULT;
1359 goto free;
1360 }
1361
1362 t = xt_find_table_lock(net, AF_INET6, name);
1363 if (IS_ERR_OR_NULL(t)) {
1364 ret = t ? PTR_ERR(t) : -ENOENT;
1365 goto free;
1366 }
1367
1368
1369 local_bh_disable();
1370 private = t->private;
1371 if (private->number != num_counters) {
1372 ret = -EINVAL;
1373 goto unlock_up_free;
1374 }
1375
1376 i = 0;
1377 /* Choose the copy that is on our node */
1378 curcpu = smp_processor_id();
1379 addend = xt_write_recseq_begin();
1380 loc_cpu_entry = private->entries[curcpu];
1381 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1382 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1383 ++i;
1384 }
1385 xt_write_recseq_end(addend);
1386
1387 unlock_up_free:
1388 local_bh_enable();
1389 xt_table_unlock(t);
1390 module_put(t->me);
1391 free:
1392 vfree(paddc);
1393
1394 return ret;
1395 }
1396
1397 #ifdef CONFIG_COMPAT
1398 struct compat_ip6t_replace {
1399 char name[XT_TABLE_MAXNAMELEN];
1400 u32 valid_hooks;
1401 u32 num_entries;
1402 u32 size;
1403 u32 hook_entry[NF_INET_NUMHOOKS];
1404 u32 underflow[NF_INET_NUMHOOKS];
1405 u32 num_counters;
1406 compat_uptr_t counters; /* struct xt_counters * */
1407 struct compat_ip6t_entry entries[0];
1408 };
1409
1410 static int
1411 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1412 unsigned int *size, struct xt_counters *counters,
1413 unsigned int i)
1414 {
1415 struct xt_entry_target *t;
1416 struct compat_ip6t_entry __user *ce;
1417 u_int16_t target_offset, next_offset;
1418 compat_uint_t origsize;
1419 const struct xt_entry_match *ematch;
1420 int ret = 0;
1421
1422 origsize = *size;
1423 ce = (struct compat_ip6t_entry __user *)*dstptr;
1424 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1425 copy_to_user(&ce->counters, &counters[i],
1426 sizeof(counters[i])) != 0)
1427 return -EFAULT;
1428
1429 *dstptr += sizeof(struct compat_ip6t_entry);
1430 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1431
1432 xt_ematch_foreach(ematch, e) {
1433 ret = xt_compat_match_to_user(ematch, dstptr, size);
1434 if (ret != 0)
1435 return ret;
1436 }
1437 target_offset = e->target_offset - (origsize - *size);
1438 t = ip6t_get_target(e);
1439 ret = xt_compat_target_to_user(t, dstptr, size);
1440 if (ret)
1441 return ret;
1442 next_offset = e->next_offset - (origsize - *size);
1443 if (put_user(target_offset, &ce->target_offset) != 0 ||
1444 put_user(next_offset, &ce->next_offset) != 0)
1445 return -EFAULT;
1446 return 0;
1447 }
1448
1449 static int
1450 compat_find_calc_match(struct xt_entry_match *m,
1451 const char *name,
1452 const struct ip6t_ip6 *ipv6,
1453 unsigned int hookmask,
1454 int *size)
1455 {
1456 struct xt_match *match;
1457
1458 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1459 m->u.user.revision);
1460 if (IS_ERR(match)) {
1461 duprintf("compat_check_calc_match: `%s' not found\n",
1462 m->u.user.name);
1463 return PTR_ERR(match);
1464 }
1465 m->u.kernel.match = match;
1466 *size += xt_compat_match_offset(match);
1467 return 0;
1468 }
1469
1470 static void compat_release_entry(struct compat_ip6t_entry *e)
1471 {
1472 struct xt_entry_target *t;
1473 struct xt_entry_match *ematch;
1474
1475 /* Cleanup all matches */
1476 xt_ematch_foreach(ematch, e)
1477 module_put(ematch->u.kernel.match->me);
1478 t = compat_ip6t_get_target(e);
1479 module_put(t->u.kernel.target->me);
1480 }
1481
1482 static int
1483 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1484 struct xt_table_info *newinfo,
1485 unsigned int *size,
1486 const unsigned char *base,
1487 const unsigned char *limit,
1488 const unsigned int *hook_entries,
1489 const unsigned int *underflows,
1490 const char *name)
1491 {
1492 struct xt_entry_match *ematch;
1493 struct xt_entry_target *t;
1494 struct xt_target *target;
1495 unsigned int entry_offset;
1496 unsigned int j;
1497 int ret, off, h;
1498
1499 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1500 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1501 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1502 (unsigned char *)e + e->next_offset > limit) {
1503 duprintf("Bad offset %p, limit = %p\n", e, limit);
1504 return -EINVAL;
1505 }
1506
1507 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1508 sizeof(struct compat_xt_entry_target)) {
1509 duprintf("checking: element %p size %u\n",
1510 e, e->next_offset);
1511 return -EINVAL;
1512 }
1513
1514 /* For purposes of check_entry casting the compat entry is fine */
1515 ret = check_entry((struct ip6t_entry *)e);
1516 if (ret)
1517 return ret;
1518
1519 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1520 entry_offset = (void *)e - (void *)base;
1521 j = 0;
1522 xt_ematch_foreach(ematch, e) {
1523 ret = compat_find_calc_match(ematch, name,
1524 &e->ipv6, e->comefrom, &off);
1525 if (ret != 0)
1526 goto release_matches;
1527 ++j;
1528 }
1529
1530 t = compat_ip6t_get_target(e);
1531 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1532 t->u.user.revision);
1533 if (IS_ERR(target)) {
1534 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1535 t->u.user.name);
1536 ret = PTR_ERR(target);
1537 goto release_matches;
1538 }
1539 t->u.kernel.target = target;
1540
1541 off += xt_compat_target_offset(target);
1542 *size += off;
1543 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1544 if (ret)
1545 goto out;
1546
1547 /* Check hooks & underflows */
1548 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1549 if ((unsigned char *)e - base == hook_entries[h])
1550 newinfo->hook_entry[h] = hook_entries[h];
1551 if ((unsigned char *)e - base == underflows[h])
1552 newinfo->underflow[h] = underflows[h];
1553 }
1554
1555 /* Clear counters and comefrom */
1556 memset(&e->counters, 0, sizeof(e->counters));
1557 e->comefrom = 0;
1558 return 0;
1559
1560 out:
1561 module_put(t->u.kernel.target->me);
1562 release_matches:
1563 xt_ematch_foreach(ematch, e) {
1564 if (j-- == 0)
1565 break;
1566 module_put(ematch->u.kernel.match->me);
1567 }
1568 return ret;
1569 }
1570
1571 static int
1572 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1573 unsigned int *size, const char *name,
1574 struct xt_table_info *newinfo, unsigned char *base)
1575 {
1576 struct xt_entry_target *t;
1577 struct ip6t_entry *de;
1578 unsigned int origsize;
1579 int ret, h;
1580 struct xt_entry_match *ematch;
1581
1582 ret = 0;
1583 origsize = *size;
1584 de = (struct ip6t_entry *)*dstptr;
1585 memcpy(de, e, sizeof(struct ip6t_entry));
1586 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1587
1588 *dstptr += sizeof(struct ip6t_entry);
1589 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1590
1591 xt_ematch_foreach(ematch, e) {
1592 ret = xt_compat_match_from_user(ematch, dstptr, size);
1593 if (ret != 0)
1594 return ret;
1595 }
1596 de->target_offset = e->target_offset - (origsize - *size);
1597 t = compat_ip6t_get_target(e);
1598 xt_compat_target_from_user(t, dstptr, size);
1599
1600 de->next_offset = e->next_offset - (origsize - *size);
1601 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1602 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1603 newinfo->hook_entry[h] -= origsize - *size;
1604 if ((unsigned char *)de - base < newinfo->underflow[h])
1605 newinfo->underflow[h] -= origsize - *size;
1606 }
1607 return ret;
1608 }
1609
1610 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1611 const char *name)
1612 {
1613 unsigned int j;
1614 int ret = 0;
1615 struct xt_mtchk_param mtpar;
1616 struct xt_entry_match *ematch;
1617
1618 j = 0;
1619 mtpar.net = net;
1620 mtpar.table = name;
1621 mtpar.entryinfo = &e->ipv6;
1622 mtpar.hook_mask = e->comefrom;
1623 mtpar.family = NFPROTO_IPV6;
1624 xt_ematch_foreach(ematch, e) {
1625 ret = check_match(ematch, &mtpar);
1626 if (ret != 0)
1627 goto cleanup_matches;
1628 ++j;
1629 }
1630
1631 ret = check_target(e, net, name);
1632 if (ret)
1633 goto cleanup_matches;
1634 return 0;
1635
1636 cleanup_matches:
1637 xt_ematch_foreach(ematch, e) {
1638 if (j-- == 0)
1639 break;
1640 cleanup_match(ematch, net);
1641 }
1642 return ret;
1643 }
1644
1645 static int
1646 translate_compat_table(struct net *net,
1647 const char *name,
1648 unsigned int valid_hooks,
1649 struct xt_table_info **pinfo,
1650 void **pentry0,
1651 unsigned int total_size,
1652 unsigned int number,
1653 unsigned int *hook_entries,
1654 unsigned int *underflows)
1655 {
1656 unsigned int i, j;
1657 struct xt_table_info *newinfo, *info;
1658 void *pos, *entry0, *entry1;
1659 struct compat_ip6t_entry *iter0;
1660 struct ip6t_entry *iter1;
1661 unsigned int size;
1662 int ret = 0;
1663
1664 info = *pinfo;
1665 entry0 = *pentry0;
1666 size = total_size;
1667 info->number = number;
1668
1669 /* Init all hooks to impossible value. */
1670 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1671 info->hook_entry[i] = 0xFFFFFFFF;
1672 info->underflow[i] = 0xFFFFFFFF;
1673 }
1674
1675 duprintf("translate_compat_table: size %u\n", info->size);
1676 j = 0;
1677 xt_compat_lock(AF_INET6);
1678 xt_compat_init_offsets(AF_INET6, number);
1679 /* Walk through entries, checking offsets. */
1680 xt_entry_foreach(iter0, entry0, total_size) {
1681 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1682 entry0,
1683 entry0 + total_size,
1684 hook_entries,
1685 underflows,
1686 name);
1687 if (ret != 0)
1688 goto out_unlock;
1689 ++j;
1690 }
1691
1692 ret = -EINVAL;
1693 if (j != number) {
1694 duprintf("translate_compat_table: %u not %u entries\n",
1695 j, number);
1696 goto out_unlock;
1697 }
1698
1699 /* Check hooks all assigned */
1700 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1701 /* Only hooks which are valid */
1702 if (!(valid_hooks & (1 << i)))
1703 continue;
1704 if (info->hook_entry[i] == 0xFFFFFFFF) {
1705 duprintf("Invalid hook entry %u %u\n",
1706 i, hook_entries[i]);
1707 goto out_unlock;
1708 }
1709 if (info->underflow[i] == 0xFFFFFFFF) {
1710 duprintf("Invalid underflow %u %u\n",
1711 i, underflows[i]);
1712 goto out_unlock;
1713 }
1714 }
1715
1716 ret = -ENOMEM;
1717 newinfo = xt_alloc_table_info(size);
1718 if (!newinfo)
1719 goto out_unlock;
1720
1721 newinfo->number = number;
1722 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1723 newinfo->hook_entry[i] = info->hook_entry[i];
1724 newinfo->underflow[i] = info->underflow[i];
1725 }
1726 entry1 = newinfo->entries[raw_smp_processor_id()];
1727 pos = entry1;
1728 size = total_size;
1729 xt_entry_foreach(iter0, entry0, total_size) {
1730 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1731 name, newinfo, entry1);
1732 if (ret != 0)
1733 break;
1734 }
1735 xt_compat_flush_offsets(AF_INET6);
1736 xt_compat_unlock(AF_INET6);
1737 if (ret)
1738 goto free_newinfo;
1739
1740 ret = -ELOOP;
1741 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1742 goto free_newinfo;
1743
1744 i = 0;
1745 xt_entry_foreach(iter1, entry1, newinfo->size) {
1746 ret = compat_check_entry(iter1, net, name);
1747 if (ret != 0)
1748 break;
1749 ++i;
1750 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1751 XT_ERROR_TARGET) == 0)
1752 ++newinfo->stacksize;
1753 }
1754 if (ret) {
1755 /*
1756 * The first i matches need cleanup_entry (calls ->destroy)
1757 * because they had called ->check already. The other j-i
1758 * entries need only release.
1759 */
1760 int skip = i;
1761 j -= i;
1762 xt_entry_foreach(iter0, entry0, newinfo->size) {
1763 if (skip-- > 0)
1764 continue;
1765 if (j-- == 0)
1766 break;
1767 compat_release_entry(iter0);
1768 }
1769 xt_entry_foreach(iter1, entry1, newinfo->size) {
1770 if (i-- == 0)
1771 break;
1772 cleanup_entry(iter1, net);
1773 }
1774 xt_free_table_info(newinfo);
1775 return ret;
1776 }
1777
1778 /* And one copy for every other CPU */
1779 for_each_possible_cpu(i)
1780 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1781 memcpy(newinfo->entries[i], entry1, newinfo->size);
1782
1783 *pinfo = newinfo;
1784 *pentry0 = entry1;
1785 xt_free_table_info(info);
1786 return 0;
1787
1788 free_newinfo:
1789 xt_free_table_info(newinfo);
1790 out:
1791 xt_entry_foreach(iter0, entry0, total_size) {
1792 if (j-- == 0)
1793 break;
1794 compat_release_entry(iter0);
1795 }
1796 return ret;
1797 out_unlock:
1798 xt_compat_flush_offsets(AF_INET6);
1799 xt_compat_unlock(AF_INET6);
1800 goto out;
1801 }
1802
1803 static int
1804 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1805 {
1806 int ret;
1807 struct compat_ip6t_replace tmp;
1808 struct xt_table_info *newinfo;
1809 void *loc_cpu_entry;
1810 struct ip6t_entry *iter;
1811
1812 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1813 return -EFAULT;
1814
1815 /* overflow check */
1816 if (tmp.size >= INT_MAX / num_possible_cpus())
1817 return -ENOMEM;
1818 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1819 return -ENOMEM;
1820 tmp.name[sizeof(tmp.name)-1] = 0;
1821
1822 newinfo = xt_alloc_table_info(tmp.size);
1823 if (!newinfo)
1824 return -ENOMEM;
1825
1826 /* choose the copy that is on our node/cpu */
1827 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1828 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1829 tmp.size) != 0) {
1830 ret = -EFAULT;
1831 goto free_newinfo;
1832 }
1833
1834 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1835 &newinfo, &loc_cpu_entry, tmp.size,
1836 tmp.num_entries, tmp.hook_entry,
1837 tmp.underflow);
1838 if (ret != 0)
1839 goto free_newinfo;
1840
1841 duprintf("compat_do_replace: Translated table\n");
1842
1843 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1844 tmp.num_counters, compat_ptr(tmp.counters));
1845 if (ret)
1846 goto free_newinfo_untrans;
1847 return 0;
1848
1849 free_newinfo_untrans:
1850 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1851 cleanup_entry(iter, net);
1852 free_newinfo:
1853 xt_free_table_info(newinfo);
1854 return ret;
1855 }
1856
1857 static int
1858 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1859 unsigned int len)
1860 {
1861 int ret;
1862
1863 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1864 return -EPERM;
1865
1866 switch (cmd) {
1867 case IP6T_SO_SET_REPLACE:
1868 ret = compat_do_replace(sock_net(sk), user, len);
1869 break;
1870
1871 case IP6T_SO_SET_ADD_COUNTERS:
1872 ret = do_add_counters(sock_net(sk), user, len, 1);
1873 break;
1874
1875 default:
1876 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1877 ret = -EINVAL;
1878 }
1879
1880 return ret;
1881 }
1882
1883 struct compat_ip6t_get_entries {
1884 char name[XT_TABLE_MAXNAMELEN];
1885 compat_uint_t size;
1886 struct compat_ip6t_entry entrytable[0];
1887 };
1888
1889 static int
1890 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1891 void __user *userptr)
1892 {
1893 struct xt_counters *counters;
1894 const struct xt_table_info *private = table->private;
1895 void __user *pos;
1896 unsigned int size;
1897 int ret = 0;
1898 const void *loc_cpu_entry;
1899 unsigned int i = 0;
1900 struct ip6t_entry *iter;
1901
1902 counters = alloc_counters(table);
1903 if (IS_ERR(counters))
1904 return PTR_ERR(counters);
1905
1906 /* choose the copy that is on our node/cpu, ...
1907 * This choice is lazy (because current thread is
1908 * allowed to migrate to another cpu)
1909 */
1910 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 pos = userptr;
1912 size = total_size;
1913 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1914 ret = compat_copy_entry_to_user(iter, &pos,
1915 &size, counters, i++);
1916 if (ret != 0)
1917 break;
1918 }
1919
1920 vfree(counters);
1921 return ret;
1922 }
1923
1924 static int
1925 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1926 int *len)
1927 {
1928 int ret;
1929 struct compat_ip6t_get_entries get;
1930 struct xt_table *t;
1931
1932 if (*len < sizeof(get)) {
1933 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1934 return -EINVAL;
1935 }
1936
1937 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1938 return -EFAULT;
1939
1940 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1941 duprintf("compat_get_entries: %u != %zu\n",
1942 *len, sizeof(get) + get.size);
1943 return -EINVAL;
1944 }
1945
1946 xt_compat_lock(AF_INET6);
1947 t = xt_find_table_lock(net, AF_INET6, get.name);
1948 if (!IS_ERR_OR_NULL(t)) {
1949 const struct xt_table_info *private = t->private;
1950 struct xt_table_info info;
1951 duprintf("t->private->number = %u\n", private->number);
1952 ret = compat_table_info(private, &info);
1953 if (!ret && get.size == info.size) {
1954 ret = compat_copy_entries_to_user(private->size,
1955 t, uptr->entrytable);
1956 } else if (!ret) {
1957 duprintf("compat_get_entries: I've got %u not %u!\n",
1958 private->size, get.size);
1959 ret = -EAGAIN;
1960 }
1961 xt_compat_flush_offsets(AF_INET6);
1962 module_put(t->me);
1963 xt_table_unlock(t);
1964 } else
1965 ret = t ? PTR_ERR(t) : -ENOENT;
1966
1967 xt_compat_unlock(AF_INET6);
1968 return ret;
1969 }
1970
1971 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1972
1973 static int
1974 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1975 {
1976 int ret;
1977
1978 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1979 return -EPERM;
1980
1981 switch (cmd) {
1982 case IP6T_SO_GET_INFO:
1983 ret = get_info(sock_net(sk), user, len, 1);
1984 break;
1985 case IP6T_SO_GET_ENTRIES:
1986 ret = compat_get_entries(sock_net(sk), user, len);
1987 break;
1988 default:
1989 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1990 }
1991 return ret;
1992 }
1993 #endif
1994
1995 static int
1996 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1997 {
1998 int ret;
1999
2000 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2001 return -EPERM;
2002
2003 switch (cmd) {
2004 case IP6T_SO_SET_REPLACE:
2005 ret = do_replace(sock_net(sk), user, len);
2006 break;
2007
2008 case IP6T_SO_SET_ADD_COUNTERS:
2009 ret = do_add_counters(sock_net(sk), user, len, 0);
2010 break;
2011
2012 default:
2013 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2014 ret = -EINVAL;
2015 }
2016
2017 return ret;
2018 }
2019
2020 static int
2021 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2022 {
2023 int ret;
2024
2025 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2026 return -EPERM;
2027
2028 switch (cmd) {
2029 case IP6T_SO_GET_INFO:
2030 ret = get_info(sock_net(sk), user, len, 0);
2031 break;
2032
2033 case IP6T_SO_GET_ENTRIES:
2034 ret = get_entries(sock_net(sk), user, len);
2035 break;
2036
2037 case IP6T_SO_GET_REVISION_MATCH:
2038 case IP6T_SO_GET_REVISION_TARGET: {
2039 struct xt_get_revision rev;
2040 int target;
2041
2042 if (*len != sizeof(rev)) {
2043 ret = -EINVAL;
2044 break;
2045 }
2046 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2047 ret = -EFAULT;
2048 break;
2049 }
2050 rev.name[sizeof(rev.name)-1] = 0;
2051
2052 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2053 target = 1;
2054 else
2055 target = 0;
2056
2057 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2058 rev.revision,
2059 target, &ret),
2060 "ip6t_%s", rev.name);
2061 break;
2062 }
2063
2064 default:
2065 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2066 ret = -EINVAL;
2067 }
2068
2069 return ret;
2070 }
2071
2072 struct xt_table *ip6t_register_table(struct net *net,
2073 const struct xt_table *table,
2074 const struct ip6t_replace *repl)
2075 {
2076 int ret;
2077 struct xt_table_info *newinfo;
2078 struct xt_table_info bootstrap = {0};
2079 void *loc_cpu_entry;
2080 struct xt_table *new_table;
2081
2082 newinfo = xt_alloc_table_info(repl->size);
2083 if (!newinfo) {
2084 ret = -ENOMEM;
2085 goto out;
2086 }
2087
2088 /* choose the copy on our node/cpu, but dont care about preemption */
2089 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2090 memcpy(loc_cpu_entry, repl->entries, repl->size);
2091
2092 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2093 if (ret != 0)
2094 goto out_free;
2095
2096 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2097 if (IS_ERR(new_table)) {
2098 ret = PTR_ERR(new_table);
2099 goto out_free;
2100 }
2101 return new_table;
2102
2103 out_free:
2104 xt_free_table_info(newinfo);
2105 out:
2106 return ERR_PTR(ret);
2107 }
2108
2109 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2110 {
2111 struct xt_table_info *private;
2112 void *loc_cpu_entry;
2113 struct module *table_owner = table->me;
2114 struct ip6t_entry *iter;
2115
2116 private = xt_unregister_table(table);
2117
2118 /* Decrease module usage counts and free resources */
2119 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2120 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2121 cleanup_entry(iter, net);
2122 if (private->number > private->initial_entries)
2123 module_put(table_owner);
2124 xt_free_table_info(private);
2125 }
2126
2127 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2128 static inline bool
2129 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2130 u_int8_t type, u_int8_t code,
2131 bool invert)
2132 {
2133 return (type == test_type && code >= min_code && code <= max_code)
2134 ^ invert;
2135 }
2136
2137 static bool
2138 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2139 {
2140 const struct icmp6hdr *ic;
2141 struct icmp6hdr _icmph;
2142 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2143
2144 /* Must not be a fragment. */
2145 if (par->fragoff != 0)
2146 return false;
2147
2148 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2149 if (ic == NULL) {
2150 /* We've been asked to examine this packet, and we
2151 * can't. Hence, no choice but to drop.
2152 */
2153 duprintf("Dropping evil ICMP tinygram.\n");
2154 par->hotdrop = true;
2155 return false;
2156 }
2157
2158 return icmp6_type_code_match(icmpinfo->type,
2159 icmpinfo->code[0],
2160 icmpinfo->code[1],
2161 ic->icmp6_type, ic->icmp6_code,
2162 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2163 }
2164
2165 /* Called when user tries to insert an entry of this type. */
2166 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2167 {
2168 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2169
2170 /* Must specify no unknown invflags */
2171 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2172 }
2173
2174 /* The built-in targets: standard (NULL) and error. */
2175 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2176 {
2177 .name = XT_STANDARD_TARGET,
2178 .targetsize = sizeof(int),
2179 .family = NFPROTO_IPV6,
2180 #ifdef CONFIG_COMPAT
2181 .compatsize = sizeof(compat_int_t),
2182 .compat_from_user = compat_standard_from_user,
2183 .compat_to_user = compat_standard_to_user,
2184 #endif
2185 },
2186 {
2187 .name = XT_ERROR_TARGET,
2188 .target = ip6t_error,
2189 .targetsize = XT_FUNCTION_MAXNAMELEN,
2190 .family = NFPROTO_IPV6,
2191 },
2192 };
2193
2194 static struct nf_sockopt_ops ip6t_sockopts = {
2195 .pf = PF_INET6,
2196 .set_optmin = IP6T_BASE_CTL,
2197 .set_optmax = IP6T_SO_SET_MAX+1,
2198 .set = do_ip6t_set_ctl,
2199 #ifdef CONFIG_COMPAT
2200 .compat_set = compat_do_ip6t_set_ctl,
2201 #endif
2202 .get_optmin = IP6T_BASE_CTL,
2203 .get_optmax = IP6T_SO_GET_MAX+1,
2204 .get = do_ip6t_get_ctl,
2205 #ifdef CONFIG_COMPAT
2206 .compat_get = compat_do_ip6t_get_ctl,
2207 #endif
2208 .owner = THIS_MODULE,
2209 };
2210
2211 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2212 {
2213 .name = "icmp6",
2214 .match = icmp6_match,
2215 .matchsize = sizeof(struct ip6t_icmp),
2216 .checkentry = icmp6_checkentry,
2217 .proto = IPPROTO_ICMPV6,
2218 .family = NFPROTO_IPV6,
2219 },
2220 };
2221
2222 static int __net_init ip6_tables_net_init(struct net *net)
2223 {
2224 return xt_proto_init(net, NFPROTO_IPV6);
2225 }
2226
2227 static void __net_exit ip6_tables_net_exit(struct net *net)
2228 {
2229 xt_proto_fini(net, NFPROTO_IPV6);
2230 }
2231
2232 static struct pernet_operations ip6_tables_net_ops = {
2233 .init = ip6_tables_net_init,
2234 .exit = ip6_tables_net_exit,
2235 };
2236
2237 static int __init ip6_tables_init(void)
2238 {
2239 int ret;
2240
2241 ret = register_pernet_subsys(&ip6_tables_net_ops);
2242 if (ret < 0)
2243 goto err1;
2244
2245 /* No one else will be downing sem now, so we won't sleep */
2246 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2247 if (ret < 0)
2248 goto err2;
2249 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2250 if (ret < 0)
2251 goto err4;
2252
2253 /* Register setsockopt */
2254 ret = nf_register_sockopt(&ip6t_sockopts);
2255 if (ret < 0)
2256 goto err5;
2257
2258 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2259 return 0;
2260
2261 err5:
2262 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2263 err4:
2264 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2265 err2:
2266 unregister_pernet_subsys(&ip6_tables_net_ops);
2267 err1:
2268 return ret;
2269 }
2270
2271 static void __exit ip6_tables_fini(void)
2272 {
2273 nf_unregister_sockopt(&ip6t_sockopts);
2274
2275 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2276 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2277 unregister_pernet_subsys(&ip6_tables_net_ops);
2278 }
2279
2280 EXPORT_SYMBOL(ip6t_register_table);
2281 EXPORT_SYMBOL(ip6t_unregister_table);
2282 EXPORT_SYMBOL(ip6t_do_table);
2283
2284 module_init(ip6_tables_init);
2285 module_exit(ip6_tables_fini);