Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
36
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
40
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
46
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
52
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
82 {
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
90 }
91
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
101 {
102 unsigned long ret;
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
104
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
106
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
112 /*
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 return false;
120 }
121
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
123
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
128 return false;
129 }
130
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
132
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
137 return false;
138 }
139
140 /* ... might want to do something with class and flowlabel here ... */
141
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
144 int protohdr;
145 unsigned short _frag_off;
146
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
148 if (protohdr < 0) {
149 if (_frag_off == 0)
150 *hotdrop = true;
151 return false;
152 }
153 *fragoff = _frag_off;
154
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
156 protohdr,
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
158 ip6info->proto);
159
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
162 return false;
163 }
164 return true;
165 }
166
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
170 return false;
171 }
172 return true;
173 }
174
175 /* should be ip6 safe */
176 static bool
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
178 {
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
182 return false;
183 }
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
187 return false;
188 }
189 return true;
190 }
191
192 static unsigned int
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
194 {
195 if (net_ratelimit())
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
198
199 return NF_DROP;
200 }
201
202 /* Performance critical - called for every packet */
203 static inline bool
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
206 {
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
209
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
212 return true;
213 else
214 return false;
215 }
216
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
219 {
220 return (struct ip6t_entry *)(base + offset);
221 }
222
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline int
226 unconditional(const struct ip6t_ip6 *ipv6)
227 {
228 unsigned int i;
229
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
232 break;
233
234 return (i == sizeof(*ipv6));
235 }
236
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
246 };
247
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
252 };
253
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
258 };
259
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
262 .u = {
263 .log = {
264 .level = 4,
265 .logflags = NF_LOG_MASK,
266 },
267 },
268 };
269
270 /* Mildly perf critical (only if packet tracing is on) */
271 static inline int
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 char *hookname, char **chainname,
274 char **comment, unsigned int *rulenum)
275 {
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
277
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
281 (*rulenum) = 0;
282 } else if (s == e) {
283 (*rulenum)++;
284
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
288 && t->verdict < 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
294 }
295 return 1;
296 } else
297 (*rulenum)++;
298
299 return 0;
300 }
301
302 static void trace_packet(struct sk_buff *skb,
303 unsigned int hook,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
309 {
310 void *table_base;
311 const struct ip6t_entry *root;
312 char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
314
315 table_base = (void *)private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
317
318 hookname = chainname = (char *)hooknames[hook];
319 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
320
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
325
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
329 }
330 #endif
331
332 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
333 unsigned int
334 ip6t_do_table(struct sk_buff *skb,
335 unsigned int hook,
336 const struct net_device *in,
337 const struct net_device *out,
338 struct xt_table *table)
339 {
340 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
341 bool hotdrop = false;
342 /* Initializing verdict to NF_DROP keeps gcc happy. */
343 unsigned int verdict = NF_DROP;
344 const char *indev, *outdev;
345 void *table_base;
346 struct ip6t_entry *e, *back;
347 struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
350
351 /* Initialization */
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
359 * match it. */
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 tgpar.hooknum = hook;
365
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
367
368 rcu_read_lock_bh();
369 private = rcu_dereference(table->private);
370 table_base = rcu_dereference(private->entries[smp_processor_id()]);
371
372 e = get_entry(table_base, private->hook_entry[hook]);
373
374 /* For return from builtin chain */
375 back = get_entry(table_base, private->underflow[hook]);
376
377 do {
378 IP_NF_ASSERT(e);
379 IP_NF_ASSERT(back);
380 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
381 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
382 struct ip6t_entry_target *t;
383
384 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
385 goto no_match;
386
387 ADD_COUNTER(e->counters,
388 ntohs(ipv6_hdr(skb)->payload_len) +
389 sizeof(struct ipv6hdr), 1);
390
391 t = ip6t_get_target(e);
392 IP_NF_ASSERT(t->u.kernel.target);
393
394 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
395 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
396 /* The packet is traced: log it */
397 if (unlikely(skb->nf_trace))
398 trace_packet(skb, hook, in, out,
399 table->name, private, e);
400 #endif
401 /* Standard target? */
402 if (!t->u.kernel.target->target) {
403 int v;
404
405 v = ((struct ip6t_standard_target *)t)->verdict;
406 if (v < 0) {
407 /* Pop from stack? */
408 if (v != IP6T_RETURN) {
409 verdict = (unsigned)(-v) - 1;
410 break;
411 }
412 e = back;
413 back = get_entry(table_base,
414 back->comefrom);
415 continue;
416 }
417 if (table_base + v != (void *)e + e->next_offset
418 && !(e->ipv6.flags & IP6T_F_GOTO)) {
419 /* Save old back ptr in next entry */
420 struct ip6t_entry *next
421 = (void *)e + e->next_offset;
422 next->comefrom
423 = (void *)back - table_base;
424 /* set back pointer to next entry */
425 back = next;
426 }
427
428 e = get_entry(table_base, v);
429 } else {
430 /* Targets which reenter must return
431 abs. verdicts */
432 tgpar.target = t->u.kernel.target;
433 tgpar.targinfo = t->data;
434
435 #ifdef CONFIG_NETFILTER_DEBUG
436 ((struct ip6t_entry *)table_base)->comefrom
437 = 0xeeeeeeec;
438 #endif
439 verdict = t->u.kernel.target->target(skb,
440 &tgpar);
441
442 #ifdef CONFIG_NETFILTER_DEBUG
443 if (((struct ip6t_entry *)table_base)->comefrom
444 != 0xeeeeeeec
445 && verdict == IP6T_CONTINUE) {
446 printk("Target %s reentered!\n",
447 t->u.kernel.target->name);
448 verdict = NF_DROP;
449 }
450 ((struct ip6t_entry *)table_base)->comefrom
451 = 0x57acc001;
452 #endif
453 if (verdict == IP6T_CONTINUE)
454 e = (void *)e + e->next_offset;
455 else
456 /* Verdict */
457 break;
458 }
459 } else {
460
461 no_match:
462 e = (void *)e + e->next_offset;
463 }
464 } while (!hotdrop);
465
466 #ifdef CONFIG_NETFILTER_DEBUG
467 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
468 #endif
469 rcu_read_unlock_bh();
470
471 #ifdef DEBUG_ALLOW_ALL
472 return NF_ACCEPT;
473 #else
474 if (hotdrop)
475 return NF_DROP;
476 else return verdict;
477 #endif
478 }
479
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
482 static int
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
485 {
486 unsigned int hook;
487
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
493
494 if (!(valid_hooks & (1 << hook)))
495 continue;
496
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
499
500 for (;;) {
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
504
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
508 return 0;
509 }
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
511
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
516 && t->verdict < 0
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
519
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
525 t->verdict);
526 return 0;
527 }
528
529 /* Return: backtrack through the last
530 big jump. */
531 do {
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
534 if (e->comefrom
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
537 "on hook %u "
538 "rule %u\n",
539 hook, pos);
540 }
541 #endif
542 oldpos = pos;
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
545
546 /* We're at the start. */
547 if (pos == oldpos)
548 goto next;
549
550 e = (struct ip6t_entry *)
551 (entry0 + pos);
552 } while (oldpos == pos + e->next_offset);
553
554 /* Move along one */
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
559 pos += size;
560 } else {
561 int newpos = t->verdict;
562
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
565 && newpos >= 0) {
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
570 newpos);
571 return 0;
572 }
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
575 pos, newpos);
576 } else {
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
579 }
580 e = (struct ip6t_entry *)
581 (entry0 + newpos);
582 e->counters.pcnt = pos;
583 pos = newpos;
584 }
585 }
586 next:
587 duprintf("Finished chain %u\n", hook);
588 }
589 return 1;
590 }
591
592 static int
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
594 {
595 struct xt_mtdtor_param par;
596
597 if (i && (*i)-- == 0)
598 return 1;
599
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
606 return 0;
607 }
608
609 static int
610 check_entry(struct ip6t_entry *e, const char *name)
611 {
612 struct ip6t_entry_target *t;
613
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
616 return -EINVAL;
617 }
618
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
620 e->next_offset)
621 return -EINVAL;
622
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
625 return -EINVAL;
626
627 return 0;
628 }
629
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
631 unsigned int *i)
632 {
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
634 int ret;
635
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
638
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
641 if (ret < 0) {
642 duprintf("ip_tables: check failed for `%s'.\n",
643 par.match->name);
644 return ret;
645 }
646 ++*i;
647 return 0;
648 }
649
650 static int
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
652 unsigned int *i)
653 {
654 struct xt_match *match;
655 int ret;
656
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
658 m->u.user.revision),
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
663 }
664 m->u.kernel.match = match;
665
666 ret = check_match(m, par, i);
667 if (ret)
668 goto err;
669
670 return 0;
671 err:
672 module_put(m->u.kernel.match->me);
673 return ret;
674 }
675
676 static int check_target(struct ip6t_entry *e, const char *name)
677 {
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
680 .table = name,
681 .entryinfo = e,
682 .target = t->u.kernel.target,
683 .targinfo = t->data,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
686 };
687 int ret;
688
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
692 if (ret < 0) {
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
695 return ret;
696 }
697 return 0;
698 }
699
700 static int
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
702 unsigned int *i)
703 {
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
706 int ret;
707 unsigned int j;
708 struct xt_mtchk_param mtpar;
709
710 ret = check_entry(e, name);
711 if (ret)
712 return ret;
713
714 j = 0;
715 mtpar.table = name;
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
720 if (ret != 0)
721 goto cleanup_matches;
722
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
725 t->u.user.name,
726 t->u.user.revision),
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
732 }
733 t->u.kernel.target = target;
734
735 ret = check_target(e, name);
736 if (ret)
737 goto err;
738
739 (*i)++;
740 return 0;
741 err:
742 module_put(t->u.kernel.target->me);
743 cleanup_matches:
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
745 return ret;
746 }
747
748 static int
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
751 unsigned char *base,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
755 unsigned int *i)
756 {
757 unsigned int h;
758
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
762 return -EINVAL;
763 }
764
765 if (e->next_offset
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
768 e, e->next_offset);
769 return -EINVAL;
770 }
771
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
778 }
779
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
782
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
785 e->comefrom = 0;
786
787 (*i)++;
788 return 0;
789 }
790
791 static int
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
793 {
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
796
797 if (i && (*i)-- == 0)
798 return 1;
799
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
803
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
810 return 0;
811 }
812
813 /* Checks and translates the user-supplied table segment (held in
814 newinfo) */
815 static int
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
819 void *entry0,
820 unsigned int size,
821 unsigned int number,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
824 {
825 unsigned int i;
826 int ret;
827
828 newinfo->size = size;
829 newinfo->number = number;
830
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
835 }
836
837 duprintf("translate_table: size %u\n", newinfo->size);
838 i = 0;
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
842 newinfo,
843 entry0,
844 entry0 + size,
845 hook_entries, underflows, &i);
846 if (ret != 0)
847 return ret;
848
849 if (i != number) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, number);
852 return -EINVAL;
853 }
854
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
859 continue;
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, hook_entries[i]);
863 return -EINVAL;
864 }
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, underflows[i]);
868 return -EINVAL;
869 }
870 }
871
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
873 return -ELOOP;
874
875 /* Finally, each sanity check must pass */
876 i = 0;
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
879
880 if (ret != 0) {
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
882 cleanup_entry, &i);
883 return ret;
884 }
885
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 }
891
892 return ret;
893 }
894
895 /* Gets counters. */
896 static inline int
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
899 unsigned int *i)
900 {
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
902
903 (*i)++;
904 return 0;
905 }
906
907 static inline int
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
910 unsigned int *i)
911 {
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
913
914 (*i)++;
915 return 0;
916 }
917
918 static void
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
921 {
922 unsigned int cpu;
923 unsigned int i;
924 unsigned int curcpu;
925
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
929 * We dont care about preemption here.
930 */
931 curcpu = raw_smp_processor_id();
932
933 i = 0;
934 IP6T_ENTRY_ITERATE(t->entries[curcpu],
935 t->size,
936 set_entry_to_counter,
937 counters,
938 &i);
939
940 for_each_possible_cpu(cpu) {
941 if (cpu == curcpu)
942 continue;
943 i = 0;
944 IP6T_ENTRY_ITERATE(t->entries[cpu],
945 t->size,
946 add_entry_to_counter,
947 counters,
948 &i);
949 }
950 }
951
952 /* We're lazy, and add to the first CPU; overflow works its fey magic
953 * and everything is OK. */
954 static int
955 add_counter_to_entry(struct ip6t_entry *e,
956 const struct xt_counters addme[],
957 unsigned int *i)
958 {
959 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
960
961 (*i)++;
962 return 0;
963 }
964
965 /* Take values from counters and add them back onto the current cpu */
966 static void put_counters(struct xt_table_info *t,
967 const struct xt_counters counters[])
968 {
969 unsigned int i, cpu;
970
971 local_bh_disable();
972 cpu = smp_processor_id();
973 i = 0;
974 IP6T_ENTRY_ITERATE(t->entries[cpu],
975 t->size,
976 add_counter_to_entry,
977 counters,
978 &i);
979 local_bh_enable();
980 }
981
982 static inline int
983 zero_entry_counter(struct ip6t_entry *e, void *arg)
984 {
985 e->counters.bcnt = 0;
986 e->counters.pcnt = 0;
987 return 0;
988 }
989
990 static void
991 clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
992 {
993 unsigned int cpu;
994 const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
995
996 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
997 for_each_possible_cpu(cpu) {
998 memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
999 IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
1000 zero_entry_counter, NULL);
1001 }
1002 }
1003
1004 static struct xt_counters *alloc_counters(struct xt_table *table)
1005 {
1006 unsigned int countersize;
1007 struct xt_counters *counters;
1008 struct xt_table_info *private = table->private;
1009 struct xt_table_info *info;
1010
1011 /* We need atomic snapshot of counters: rest doesn't change
1012 (other than comefrom, which userspace doesn't care
1013 about). */
1014 countersize = sizeof(struct xt_counters) * private->number;
1015 counters = vmalloc_node(countersize, numa_node_id());
1016
1017 if (counters == NULL)
1018 goto nomem;
1019
1020 info = xt_alloc_table_info(private->size);
1021 if (!info)
1022 goto free_counters;
1023
1024 clone_counters(info, private);
1025
1026 mutex_lock(&table->lock);
1027 xt_table_entry_swap_rcu(private, info);
1028 synchronize_net(); /* Wait until smoke has cleared */
1029
1030 get_counters(info, counters);
1031 put_counters(private, counters);
1032 mutex_unlock(&table->lock);
1033
1034 xt_free_table_info(info);
1035
1036 return counters;
1037
1038 free_counters:
1039 vfree(counters);
1040 nomem:
1041 return ERR_PTR(-ENOMEM);
1042 }
1043
1044 static int
1045 copy_entries_to_user(unsigned int total_size,
1046 struct xt_table *table,
1047 void __user *userptr)
1048 {
1049 unsigned int off, num;
1050 struct ip6t_entry *e;
1051 struct xt_counters *counters;
1052 const struct xt_table_info *private = table->private;
1053 int ret = 0;
1054 const void *loc_cpu_entry;
1055
1056 counters = alloc_counters(table);
1057 if (IS_ERR(counters))
1058 return PTR_ERR(counters);
1059
1060 /* choose the copy that is on our node/cpu, ...
1061 * This choice is lazy (because current thread is
1062 * allowed to migrate to another cpu)
1063 */
1064 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1065 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1066 ret = -EFAULT;
1067 goto free_counters;
1068 }
1069
1070 /* FIXME: use iterator macros --RR */
1071 /* ... then go back and fix counters and names */
1072 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1073 unsigned int i;
1074 const struct ip6t_entry_match *m;
1075 const struct ip6t_entry_target *t;
1076
1077 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1078 if (copy_to_user(userptr + off
1079 + offsetof(struct ip6t_entry, counters),
1080 &counters[num],
1081 sizeof(counters[num])) != 0) {
1082 ret = -EFAULT;
1083 goto free_counters;
1084 }
1085
1086 for (i = sizeof(struct ip6t_entry);
1087 i < e->target_offset;
1088 i += m->u.match_size) {
1089 m = (void *)e + i;
1090
1091 if (copy_to_user(userptr + off + i
1092 + offsetof(struct ip6t_entry_match,
1093 u.user.name),
1094 m->u.kernel.match->name,
1095 strlen(m->u.kernel.match->name)+1)
1096 != 0) {
1097 ret = -EFAULT;
1098 goto free_counters;
1099 }
1100 }
1101
1102 t = ip6t_get_target(e);
1103 if (copy_to_user(userptr + off + e->target_offset
1104 + offsetof(struct ip6t_entry_target,
1105 u.user.name),
1106 t->u.kernel.target->name,
1107 strlen(t->u.kernel.target->name)+1) != 0) {
1108 ret = -EFAULT;
1109 goto free_counters;
1110 }
1111 }
1112
1113 free_counters:
1114 vfree(counters);
1115 return ret;
1116 }
1117
1118 #ifdef CONFIG_COMPAT
1119 static void compat_standard_from_user(void *dst, void *src)
1120 {
1121 int v = *(compat_int_t *)src;
1122
1123 if (v > 0)
1124 v += xt_compat_calc_jump(AF_INET6, v);
1125 memcpy(dst, &v, sizeof(v));
1126 }
1127
1128 static int compat_standard_to_user(void __user *dst, void *src)
1129 {
1130 compat_int_t cv = *(int *)src;
1131
1132 if (cv > 0)
1133 cv -= xt_compat_calc_jump(AF_INET6, cv);
1134 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1135 }
1136
1137 static inline int
1138 compat_calc_match(struct ip6t_entry_match *m, int *size)
1139 {
1140 *size += xt_compat_match_offset(m->u.kernel.match);
1141 return 0;
1142 }
1143
1144 static int compat_calc_entry(struct ip6t_entry *e,
1145 const struct xt_table_info *info,
1146 void *base, struct xt_table_info *newinfo)
1147 {
1148 struct ip6t_entry_target *t;
1149 unsigned int entry_offset;
1150 int off, i, ret;
1151
1152 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1153 entry_offset = (void *)e - base;
1154 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1155 t = ip6t_get_target(e);
1156 off += xt_compat_target_offset(t->u.kernel.target);
1157 newinfo->size -= off;
1158 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1159 if (ret)
1160 return ret;
1161
1162 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1163 if (info->hook_entry[i] &&
1164 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1165 newinfo->hook_entry[i] -= off;
1166 if (info->underflow[i] &&
1167 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1168 newinfo->underflow[i] -= off;
1169 }
1170 return 0;
1171 }
1172
1173 static int compat_table_info(const struct xt_table_info *info,
1174 struct xt_table_info *newinfo)
1175 {
1176 void *loc_cpu_entry;
1177
1178 if (!newinfo || !info)
1179 return -EINVAL;
1180
1181 /* we dont care about newinfo->entries[] */
1182 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1183 newinfo->initial_entries = 0;
1184 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1185 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1186 compat_calc_entry, info, loc_cpu_entry,
1187 newinfo);
1188 }
1189 #endif
1190
1191 static int get_info(struct net *net, void __user *user, int *len, int compat)
1192 {
1193 char name[IP6T_TABLE_MAXNAMELEN];
1194 struct xt_table *t;
1195 int ret;
1196
1197 if (*len != sizeof(struct ip6t_getinfo)) {
1198 duprintf("length %u != %zu\n", *len,
1199 sizeof(struct ip6t_getinfo));
1200 return -EINVAL;
1201 }
1202
1203 if (copy_from_user(name, user, sizeof(name)) != 0)
1204 return -EFAULT;
1205
1206 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1207 #ifdef CONFIG_COMPAT
1208 if (compat)
1209 xt_compat_lock(AF_INET6);
1210 #endif
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1212 "ip6table_%s", name);
1213 if (t && !IS_ERR(t)) {
1214 struct ip6t_getinfo info;
1215 const struct xt_table_info *private = t->private;
1216
1217 #ifdef CONFIG_COMPAT
1218 if (compat) {
1219 struct xt_table_info tmp;
1220 ret = compat_table_info(private, &tmp);
1221 xt_compat_flush_offsets(AF_INET6);
1222 private = &tmp;
1223 }
1224 #endif
1225 info.valid_hooks = t->valid_hooks;
1226 memcpy(info.hook_entry, private->hook_entry,
1227 sizeof(info.hook_entry));
1228 memcpy(info.underflow, private->underflow,
1229 sizeof(info.underflow));
1230 info.num_entries = private->number;
1231 info.size = private->size;
1232 strcpy(info.name, name);
1233
1234 if (copy_to_user(user, &info, *len) != 0)
1235 ret = -EFAULT;
1236 else
1237 ret = 0;
1238
1239 xt_table_unlock(t);
1240 module_put(t->me);
1241 } else
1242 ret = t ? PTR_ERR(t) : -ENOENT;
1243 #ifdef CONFIG_COMPAT
1244 if (compat)
1245 xt_compat_unlock(AF_INET6);
1246 #endif
1247 return ret;
1248 }
1249
1250 static int
1251 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1252 {
1253 int ret;
1254 struct ip6t_get_entries get;
1255 struct xt_table *t;
1256
1257 if (*len < sizeof(get)) {
1258 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1259 return -EINVAL;
1260 }
1261 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1262 return -EFAULT;
1263 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1264 duprintf("get_entries: %u != %zu\n",
1265 *len, sizeof(get) + get.size);
1266 return -EINVAL;
1267 }
1268
1269 t = xt_find_table_lock(net, AF_INET6, get.name);
1270 if (t && !IS_ERR(t)) {
1271 struct xt_table_info *private = t->private;
1272 duprintf("t->private->number = %u\n", private->number);
1273 if (get.size == private->size)
1274 ret = copy_entries_to_user(private->size,
1275 t, uptr->entrytable);
1276 else {
1277 duprintf("get_entries: I've got %u not %u!\n",
1278 private->size, get.size);
1279 ret = -EAGAIN;
1280 }
1281 module_put(t->me);
1282 xt_table_unlock(t);
1283 } else
1284 ret = t ? PTR_ERR(t) : -ENOENT;
1285
1286 return ret;
1287 }
1288
1289 static int
1290 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1291 struct xt_table_info *newinfo, unsigned int num_counters,
1292 void __user *counters_ptr)
1293 {
1294 int ret;
1295 struct xt_table *t;
1296 struct xt_table_info *oldinfo;
1297 struct xt_counters *counters;
1298 const void *loc_cpu_old_entry;
1299
1300 ret = 0;
1301 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1302 numa_node_id());
1303 if (!counters) {
1304 ret = -ENOMEM;
1305 goto out;
1306 }
1307
1308 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1309 "ip6table_%s", name);
1310 if (!t || IS_ERR(t)) {
1311 ret = t ? PTR_ERR(t) : -ENOENT;
1312 goto free_newinfo_counters_untrans;
1313 }
1314
1315 /* You lied! */
1316 if (valid_hooks != t->valid_hooks) {
1317 duprintf("Valid hook crap: %08X vs %08X\n",
1318 valid_hooks, t->valid_hooks);
1319 ret = -EINVAL;
1320 goto put_module;
1321 }
1322
1323 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1324 if (!oldinfo)
1325 goto put_module;
1326
1327 /* Update module usage count based on number of rules */
1328 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1329 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1330 if ((oldinfo->number > oldinfo->initial_entries) ||
1331 (newinfo->number <= oldinfo->initial_entries))
1332 module_put(t->me);
1333 if ((oldinfo->number > oldinfo->initial_entries) &&
1334 (newinfo->number <= oldinfo->initial_entries))
1335 module_put(t->me);
1336
1337 /* Get the old counters. */
1338 get_counters(oldinfo, counters);
1339 /* Decrease module usage counts and free resource */
1340 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1341 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1342 NULL);
1343 xt_free_table_info(oldinfo);
1344 if (copy_to_user(counters_ptr, counters,
1345 sizeof(struct xt_counters) * num_counters) != 0)
1346 ret = -EFAULT;
1347 vfree(counters);
1348 xt_table_unlock(t);
1349 return ret;
1350
1351 put_module:
1352 module_put(t->me);
1353 xt_table_unlock(t);
1354 free_newinfo_counters_untrans:
1355 vfree(counters);
1356 out:
1357 return ret;
1358 }
1359
1360 static int
1361 do_replace(struct net *net, void __user *user, unsigned int len)
1362 {
1363 int ret;
1364 struct ip6t_replace tmp;
1365 struct xt_table_info *newinfo;
1366 void *loc_cpu_entry;
1367
1368 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1369 return -EFAULT;
1370
1371 /* overflow check */
1372 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1373 return -ENOMEM;
1374
1375 newinfo = xt_alloc_table_info(tmp.size);
1376 if (!newinfo)
1377 return -ENOMEM;
1378
1379 /* choose the copy that is on our node/cpu */
1380 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1381 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1382 tmp.size) != 0) {
1383 ret = -EFAULT;
1384 goto free_newinfo;
1385 }
1386
1387 ret = translate_table(tmp.name, tmp.valid_hooks,
1388 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1389 tmp.hook_entry, tmp.underflow);
1390 if (ret != 0)
1391 goto free_newinfo;
1392
1393 duprintf("ip_tables: Translated table\n");
1394
1395 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1396 tmp.num_counters, tmp.counters);
1397 if (ret)
1398 goto free_newinfo_untrans;
1399 return 0;
1400
1401 free_newinfo_untrans:
1402 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1403 free_newinfo:
1404 xt_free_table_info(newinfo);
1405 return ret;
1406 }
1407
1408 static int
1409 do_add_counters(struct net *net, void __user *user, unsigned int len,
1410 int compat)
1411 {
1412 unsigned int i;
1413 struct xt_counters_info tmp;
1414 struct xt_counters *paddc;
1415 unsigned int num_counters;
1416 char *name;
1417 int size;
1418 void *ptmp;
1419 struct xt_table *t;
1420 const struct xt_table_info *private;
1421 int ret = 0;
1422 const void *loc_cpu_entry;
1423 #ifdef CONFIG_COMPAT
1424 struct compat_xt_counters_info compat_tmp;
1425
1426 if (compat) {
1427 ptmp = &compat_tmp;
1428 size = sizeof(struct compat_xt_counters_info);
1429 } else
1430 #endif
1431 {
1432 ptmp = &tmp;
1433 size = sizeof(struct xt_counters_info);
1434 }
1435
1436 if (copy_from_user(ptmp, user, size) != 0)
1437 return -EFAULT;
1438
1439 #ifdef CONFIG_COMPAT
1440 if (compat) {
1441 num_counters = compat_tmp.num_counters;
1442 name = compat_tmp.name;
1443 } else
1444 #endif
1445 {
1446 num_counters = tmp.num_counters;
1447 name = tmp.name;
1448 }
1449
1450 if (len != size + num_counters * sizeof(struct xt_counters))
1451 return -EINVAL;
1452
1453 paddc = vmalloc_node(len - size, numa_node_id());
1454 if (!paddc)
1455 return -ENOMEM;
1456
1457 if (copy_from_user(paddc, user + size, len - size) != 0) {
1458 ret = -EFAULT;
1459 goto free;
1460 }
1461
1462 t = xt_find_table_lock(net, AF_INET6, name);
1463 if (!t || IS_ERR(t)) {
1464 ret = t ? PTR_ERR(t) : -ENOENT;
1465 goto free;
1466 }
1467
1468 mutex_lock(&t->lock);
1469 private = t->private;
1470 if (private->number != num_counters) {
1471 ret = -EINVAL;
1472 goto unlock_up_free;
1473 }
1474
1475 preempt_disable();
1476 i = 0;
1477 /* Choose the copy that is on our node */
1478 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1479 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1480 private->size,
1481 add_counter_to_entry,
1482 paddc,
1483 &i);
1484 preempt_enable();
1485 unlock_up_free:
1486 mutex_unlock(&t->lock);
1487 xt_table_unlock(t);
1488 module_put(t->me);
1489 free:
1490 vfree(paddc);
1491
1492 return ret;
1493 }
1494
1495 #ifdef CONFIG_COMPAT
1496 struct compat_ip6t_replace {
1497 char name[IP6T_TABLE_MAXNAMELEN];
1498 u32 valid_hooks;
1499 u32 num_entries;
1500 u32 size;
1501 u32 hook_entry[NF_INET_NUMHOOKS];
1502 u32 underflow[NF_INET_NUMHOOKS];
1503 u32 num_counters;
1504 compat_uptr_t counters; /* struct ip6t_counters * */
1505 struct compat_ip6t_entry entries[0];
1506 };
1507
1508 static int
1509 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1510 unsigned int *size, struct xt_counters *counters,
1511 unsigned int *i)
1512 {
1513 struct ip6t_entry_target *t;
1514 struct compat_ip6t_entry __user *ce;
1515 u_int16_t target_offset, next_offset;
1516 compat_uint_t origsize;
1517 int ret;
1518
1519 ret = -EFAULT;
1520 origsize = *size;
1521 ce = (struct compat_ip6t_entry __user *)*dstptr;
1522 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1523 goto out;
1524
1525 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1526 goto out;
1527
1528 *dstptr += sizeof(struct compat_ip6t_entry);
1529 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1530
1531 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1532 target_offset = e->target_offset - (origsize - *size);
1533 if (ret)
1534 goto out;
1535 t = ip6t_get_target(e);
1536 ret = xt_compat_target_to_user(t, dstptr, size);
1537 if (ret)
1538 goto out;
1539 ret = -EFAULT;
1540 next_offset = e->next_offset - (origsize - *size);
1541 if (put_user(target_offset, &ce->target_offset))
1542 goto out;
1543 if (put_user(next_offset, &ce->next_offset))
1544 goto out;
1545
1546 (*i)++;
1547 return 0;
1548 out:
1549 return ret;
1550 }
1551
1552 static int
1553 compat_find_calc_match(struct ip6t_entry_match *m,
1554 const char *name,
1555 const struct ip6t_ip6 *ipv6,
1556 unsigned int hookmask,
1557 int *size, unsigned int *i)
1558 {
1559 struct xt_match *match;
1560
1561 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1562 m->u.user.revision),
1563 "ip6t_%s", m->u.user.name);
1564 if (IS_ERR(match) || !match) {
1565 duprintf("compat_check_calc_match: `%s' not found\n",
1566 m->u.user.name);
1567 return match ? PTR_ERR(match) : -ENOENT;
1568 }
1569 m->u.kernel.match = match;
1570 *size += xt_compat_match_offset(match);
1571
1572 (*i)++;
1573 return 0;
1574 }
1575
1576 static int
1577 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1578 {
1579 if (i && (*i)-- == 0)
1580 return 1;
1581
1582 module_put(m->u.kernel.match->me);
1583 return 0;
1584 }
1585
1586 static int
1587 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1588 {
1589 struct ip6t_entry_target *t;
1590
1591 if (i && (*i)-- == 0)
1592 return 1;
1593
1594 /* Cleanup all matches */
1595 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1596 t = compat_ip6t_get_target(e);
1597 module_put(t->u.kernel.target->me);
1598 return 0;
1599 }
1600
1601 static int
1602 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1603 struct xt_table_info *newinfo,
1604 unsigned int *size,
1605 unsigned char *base,
1606 unsigned char *limit,
1607 unsigned int *hook_entries,
1608 unsigned int *underflows,
1609 unsigned int *i,
1610 const char *name)
1611 {
1612 struct ip6t_entry_target *t;
1613 struct xt_target *target;
1614 unsigned int entry_offset;
1615 unsigned int j;
1616 int ret, off, h;
1617
1618 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1619 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1620 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1621 duprintf("Bad offset %p, limit = %p\n", e, limit);
1622 return -EINVAL;
1623 }
1624
1625 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1626 sizeof(struct compat_xt_entry_target)) {
1627 duprintf("checking: element %p size %u\n",
1628 e, e->next_offset);
1629 return -EINVAL;
1630 }
1631
1632 /* For purposes of check_entry casting the compat entry is fine */
1633 ret = check_entry((struct ip6t_entry *)e, name);
1634 if (ret)
1635 return ret;
1636
1637 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1638 entry_offset = (void *)e - (void *)base;
1639 j = 0;
1640 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1641 &e->ipv6, e->comefrom, &off, &j);
1642 if (ret != 0)
1643 goto release_matches;
1644
1645 t = compat_ip6t_get_target(e);
1646 target = try_then_request_module(xt_find_target(AF_INET6,
1647 t->u.user.name,
1648 t->u.user.revision),
1649 "ip6t_%s", t->u.user.name);
1650 if (IS_ERR(target) || !target) {
1651 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1652 t->u.user.name);
1653 ret = target ? PTR_ERR(target) : -ENOENT;
1654 goto release_matches;
1655 }
1656 t->u.kernel.target = target;
1657
1658 off += xt_compat_target_offset(target);
1659 *size += off;
1660 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1661 if (ret)
1662 goto out;
1663
1664 /* Check hooks & underflows */
1665 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1666 if ((unsigned char *)e - base == hook_entries[h])
1667 newinfo->hook_entry[h] = hook_entries[h];
1668 if ((unsigned char *)e - base == underflows[h])
1669 newinfo->underflow[h] = underflows[h];
1670 }
1671
1672 /* Clear counters and comefrom */
1673 memset(&e->counters, 0, sizeof(e->counters));
1674 e->comefrom = 0;
1675
1676 (*i)++;
1677 return 0;
1678
1679 out:
1680 module_put(t->u.kernel.target->me);
1681 release_matches:
1682 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1683 return ret;
1684 }
1685
1686 static int
1687 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1688 unsigned int *size, const char *name,
1689 struct xt_table_info *newinfo, unsigned char *base)
1690 {
1691 struct ip6t_entry_target *t;
1692 struct xt_target *target;
1693 struct ip6t_entry *de;
1694 unsigned int origsize;
1695 int ret, h;
1696
1697 ret = 0;
1698 origsize = *size;
1699 de = (struct ip6t_entry *)*dstptr;
1700 memcpy(de, e, sizeof(struct ip6t_entry));
1701 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1702
1703 *dstptr += sizeof(struct ip6t_entry);
1704 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1705
1706 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1707 dstptr, size);
1708 if (ret)
1709 return ret;
1710 de->target_offset = e->target_offset - (origsize - *size);
1711 t = compat_ip6t_get_target(e);
1712 target = t->u.kernel.target;
1713 xt_compat_target_from_user(t, dstptr, size);
1714
1715 de->next_offset = e->next_offset - (origsize - *size);
1716 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1717 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1718 newinfo->hook_entry[h] -= origsize - *size;
1719 if ((unsigned char *)de - base < newinfo->underflow[h])
1720 newinfo->underflow[h] -= origsize - *size;
1721 }
1722 return ret;
1723 }
1724
1725 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1726 unsigned int *i)
1727 {
1728 unsigned int j;
1729 int ret;
1730 struct xt_mtchk_param mtpar;
1731
1732 j = 0;
1733 mtpar.table = name;
1734 mtpar.entryinfo = &e->ipv6;
1735 mtpar.hook_mask = e->comefrom;
1736 mtpar.family = NFPROTO_IPV6;
1737 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1738 if (ret)
1739 goto cleanup_matches;
1740
1741 ret = check_target(e, name);
1742 if (ret)
1743 goto cleanup_matches;
1744
1745 (*i)++;
1746 return 0;
1747
1748 cleanup_matches:
1749 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1750 return ret;
1751 }
1752
1753 static int
1754 translate_compat_table(const char *name,
1755 unsigned int valid_hooks,
1756 struct xt_table_info **pinfo,
1757 void **pentry0,
1758 unsigned int total_size,
1759 unsigned int number,
1760 unsigned int *hook_entries,
1761 unsigned int *underflows)
1762 {
1763 unsigned int i, j;
1764 struct xt_table_info *newinfo, *info;
1765 void *pos, *entry0, *entry1;
1766 unsigned int size;
1767 int ret;
1768
1769 info = *pinfo;
1770 entry0 = *pentry0;
1771 size = total_size;
1772 info->number = number;
1773
1774 /* Init all hooks to impossible value. */
1775 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1776 info->hook_entry[i] = 0xFFFFFFFF;
1777 info->underflow[i] = 0xFFFFFFFF;
1778 }
1779
1780 duprintf("translate_compat_table: size %u\n", info->size);
1781 j = 0;
1782 xt_compat_lock(AF_INET6);
1783 /* Walk through entries, checking offsets. */
1784 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1785 check_compat_entry_size_and_hooks,
1786 info, &size, entry0,
1787 entry0 + total_size,
1788 hook_entries, underflows, &j, name);
1789 if (ret != 0)
1790 goto out_unlock;
1791
1792 ret = -EINVAL;
1793 if (j != number) {
1794 duprintf("translate_compat_table: %u not %u entries\n",
1795 j, number);
1796 goto out_unlock;
1797 }
1798
1799 /* Check hooks all assigned */
1800 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1801 /* Only hooks which are valid */
1802 if (!(valid_hooks & (1 << i)))
1803 continue;
1804 if (info->hook_entry[i] == 0xFFFFFFFF) {
1805 duprintf("Invalid hook entry %u %u\n",
1806 i, hook_entries[i]);
1807 goto out_unlock;
1808 }
1809 if (info->underflow[i] == 0xFFFFFFFF) {
1810 duprintf("Invalid underflow %u %u\n",
1811 i, underflows[i]);
1812 goto out_unlock;
1813 }
1814 }
1815
1816 ret = -ENOMEM;
1817 newinfo = xt_alloc_table_info(size);
1818 if (!newinfo)
1819 goto out_unlock;
1820
1821 newinfo->number = number;
1822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1823 newinfo->hook_entry[i] = info->hook_entry[i];
1824 newinfo->underflow[i] = info->underflow[i];
1825 }
1826 entry1 = newinfo->entries[raw_smp_processor_id()];
1827 pos = entry1;
1828 size = total_size;
1829 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1830 compat_copy_entry_from_user,
1831 &pos, &size, name, newinfo, entry1);
1832 xt_compat_flush_offsets(AF_INET6);
1833 xt_compat_unlock(AF_INET6);
1834 if (ret)
1835 goto free_newinfo;
1836
1837 ret = -ELOOP;
1838 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1839 goto free_newinfo;
1840
1841 i = 0;
1842 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1843 name, &i);
1844 if (ret) {
1845 j -= i;
1846 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1847 compat_release_entry, &j);
1848 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1849 xt_free_table_info(newinfo);
1850 return ret;
1851 }
1852
1853 /* And one copy for every other CPU */
1854 for_each_possible_cpu(i)
1855 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1856 memcpy(newinfo->entries[i], entry1, newinfo->size);
1857
1858 *pinfo = newinfo;
1859 *pentry0 = entry1;
1860 xt_free_table_info(info);
1861 return 0;
1862
1863 free_newinfo:
1864 xt_free_table_info(newinfo);
1865 out:
1866 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1867 return ret;
1868 out_unlock:
1869 xt_compat_flush_offsets(AF_INET6);
1870 xt_compat_unlock(AF_INET6);
1871 goto out;
1872 }
1873
1874 static int
1875 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1876 {
1877 int ret;
1878 struct compat_ip6t_replace tmp;
1879 struct xt_table_info *newinfo;
1880 void *loc_cpu_entry;
1881
1882 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1883 return -EFAULT;
1884
1885 /* overflow check */
1886 if (tmp.size >= INT_MAX / num_possible_cpus())
1887 return -ENOMEM;
1888 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1889 return -ENOMEM;
1890
1891 newinfo = xt_alloc_table_info(tmp.size);
1892 if (!newinfo)
1893 return -ENOMEM;
1894
1895 /* choose the copy that is on our node/cpu */
1896 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1897 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1898 tmp.size) != 0) {
1899 ret = -EFAULT;
1900 goto free_newinfo;
1901 }
1902
1903 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1904 &newinfo, &loc_cpu_entry, tmp.size,
1905 tmp.num_entries, tmp.hook_entry,
1906 tmp.underflow);
1907 if (ret != 0)
1908 goto free_newinfo;
1909
1910 duprintf("compat_do_replace: Translated table\n");
1911
1912 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1913 tmp.num_counters, compat_ptr(tmp.counters));
1914 if (ret)
1915 goto free_newinfo_untrans;
1916 return 0;
1917
1918 free_newinfo_untrans:
1919 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1920 free_newinfo:
1921 xt_free_table_info(newinfo);
1922 return ret;
1923 }
1924
1925 static int
1926 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1927 unsigned int len)
1928 {
1929 int ret;
1930
1931 if (!capable(CAP_NET_ADMIN))
1932 return -EPERM;
1933
1934 switch (cmd) {
1935 case IP6T_SO_SET_REPLACE:
1936 ret = compat_do_replace(sock_net(sk), user, len);
1937 break;
1938
1939 case IP6T_SO_SET_ADD_COUNTERS:
1940 ret = do_add_counters(sock_net(sk), user, len, 1);
1941 break;
1942
1943 default:
1944 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1945 ret = -EINVAL;
1946 }
1947
1948 return ret;
1949 }
1950
1951 struct compat_ip6t_get_entries {
1952 char name[IP6T_TABLE_MAXNAMELEN];
1953 compat_uint_t size;
1954 struct compat_ip6t_entry entrytable[0];
1955 };
1956
1957 static int
1958 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1959 void __user *userptr)
1960 {
1961 struct xt_counters *counters;
1962 const struct xt_table_info *private = table->private;
1963 void __user *pos;
1964 unsigned int size;
1965 int ret = 0;
1966 const void *loc_cpu_entry;
1967 unsigned int i = 0;
1968
1969 counters = alloc_counters(table);
1970 if (IS_ERR(counters))
1971 return PTR_ERR(counters);
1972
1973 /* choose the copy that is on our node/cpu, ...
1974 * This choice is lazy (because current thread is
1975 * allowed to migrate to another cpu)
1976 */
1977 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1978 pos = userptr;
1979 size = total_size;
1980 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1981 compat_copy_entry_to_user,
1982 &pos, &size, counters, &i);
1983
1984 vfree(counters);
1985 return ret;
1986 }
1987
1988 static int
1989 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1990 int *len)
1991 {
1992 int ret;
1993 struct compat_ip6t_get_entries get;
1994 struct xt_table *t;
1995
1996 if (*len < sizeof(get)) {
1997 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1998 return -EINVAL;
1999 }
2000
2001 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2002 return -EFAULT;
2003
2004 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2005 duprintf("compat_get_entries: %u != %zu\n",
2006 *len, sizeof(get) + get.size);
2007 return -EINVAL;
2008 }
2009
2010 xt_compat_lock(AF_INET6);
2011 t = xt_find_table_lock(net, AF_INET6, get.name);
2012 if (t && !IS_ERR(t)) {
2013 const struct xt_table_info *private = t->private;
2014 struct xt_table_info info;
2015 duprintf("t->private->number = %u\n", private->number);
2016 ret = compat_table_info(private, &info);
2017 if (!ret && get.size == info.size) {
2018 ret = compat_copy_entries_to_user(private->size,
2019 t, uptr->entrytable);
2020 } else if (!ret) {
2021 duprintf("compat_get_entries: I've got %u not %u!\n",
2022 private->size, get.size);
2023 ret = -EAGAIN;
2024 }
2025 xt_compat_flush_offsets(AF_INET6);
2026 module_put(t->me);
2027 xt_table_unlock(t);
2028 } else
2029 ret = t ? PTR_ERR(t) : -ENOENT;
2030
2031 xt_compat_unlock(AF_INET6);
2032 return ret;
2033 }
2034
2035 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2036
2037 static int
2038 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2039 {
2040 int ret;
2041
2042 if (!capable(CAP_NET_ADMIN))
2043 return -EPERM;
2044
2045 switch (cmd) {
2046 case IP6T_SO_GET_INFO:
2047 ret = get_info(sock_net(sk), user, len, 1);
2048 break;
2049 case IP6T_SO_GET_ENTRIES:
2050 ret = compat_get_entries(sock_net(sk), user, len);
2051 break;
2052 default:
2053 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2054 }
2055 return ret;
2056 }
2057 #endif
2058
2059 static int
2060 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2061 {
2062 int ret;
2063
2064 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM;
2066
2067 switch (cmd) {
2068 case IP6T_SO_SET_REPLACE:
2069 ret = do_replace(sock_net(sk), user, len);
2070 break;
2071
2072 case IP6T_SO_SET_ADD_COUNTERS:
2073 ret = do_add_counters(sock_net(sk), user, len, 0);
2074 break;
2075
2076 default:
2077 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2078 ret = -EINVAL;
2079 }
2080
2081 return ret;
2082 }
2083
2084 static int
2085 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2086 {
2087 int ret;
2088
2089 if (!capable(CAP_NET_ADMIN))
2090 return -EPERM;
2091
2092 switch (cmd) {
2093 case IP6T_SO_GET_INFO:
2094 ret = get_info(sock_net(sk), user, len, 0);
2095 break;
2096
2097 case IP6T_SO_GET_ENTRIES:
2098 ret = get_entries(sock_net(sk), user, len);
2099 break;
2100
2101 case IP6T_SO_GET_REVISION_MATCH:
2102 case IP6T_SO_GET_REVISION_TARGET: {
2103 struct ip6t_get_revision rev;
2104 int target;
2105
2106 if (*len != sizeof(rev)) {
2107 ret = -EINVAL;
2108 break;
2109 }
2110 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2111 ret = -EFAULT;
2112 break;
2113 }
2114
2115 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2116 target = 1;
2117 else
2118 target = 0;
2119
2120 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2121 rev.revision,
2122 target, &ret),
2123 "ip6t_%s", rev.name);
2124 break;
2125 }
2126
2127 default:
2128 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2129 ret = -EINVAL;
2130 }
2131
2132 return ret;
2133 }
2134
2135 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2136 const struct ip6t_replace *repl)
2137 {
2138 int ret;
2139 struct xt_table_info *newinfo;
2140 struct xt_table_info bootstrap
2141 = { 0, 0, 0, { 0 }, { 0 }, { } };
2142 void *loc_cpu_entry;
2143 struct xt_table *new_table;
2144
2145 newinfo = xt_alloc_table_info(repl->size);
2146 if (!newinfo) {
2147 ret = -ENOMEM;
2148 goto out;
2149 }
2150
2151 /* choose the copy on our node/cpu, but dont care about preemption */
2152 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2153 memcpy(loc_cpu_entry, repl->entries, repl->size);
2154
2155 ret = translate_table(table->name, table->valid_hooks,
2156 newinfo, loc_cpu_entry, repl->size,
2157 repl->num_entries,
2158 repl->hook_entry,
2159 repl->underflow);
2160 if (ret != 0)
2161 goto out_free;
2162
2163 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2164 if (IS_ERR(new_table)) {
2165 ret = PTR_ERR(new_table);
2166 goto out_free;
2167 }
2168 return new_table;
2169
2170 out_free:
2171 xt_free_table_info(newinfo);
2172 out:
2173 return ERR_PTR(ret);
2174 }
2175
2176 void ip6t_unregister_table(struct xt_table *table)
2177 {
2178 struct xt_table_info *private;
2179 void *loc_cpu_entry;
2180 struct module *table_owner = table->me;
2181
2182 private = xt_unregister_table(table);
2183
2184 /* Decrease module usage counts and free resources */
2185 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2186 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2187 if (private->number > private->initial_entries)
2188 module_put(table_owner);
2189 xt_free_table_info(private);
2190 }
2191
2192 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2193 static inline bool
2194 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2195 u_int8_t type, u_int8_t code,
2196 bool invert)
2197 {
2198 return (type == test_type && code >= min_code && code <= max_code)
2199 ^ invert;
2200 }
2201
2202 static bool
2203 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2204 {
2205 const struct icmp6hdr *ic;
2206 struct icmp6hdr _icmph;
2207 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2208
2209 /* Must not be a fragment. */
2210 if (par->fragoff != 0)
2211 return false;
2212
2213 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2214 if (ic == NULL) {
2215 /* We've been asked to examine this packet, and we
2216 * can't. Hence, no choice but to drop.
2217 */
2218 duprintf("Dropping evil ICMP tinygram.\n");
2219 *par->hotdrop = true;
2220 return false;
2221 }
2222
2223 return icmp6_type_code_match(icmpinfo->type,
2224 icmpinfo->code[0],
2225 icmpinfo->code[1],
2226 ic->icmp6_type, ic->icmp6_code,
2227 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2228 }
2229
2230 /* Called when user tries to insert an entry of this type. */
2231 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2232 {
2233 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2234
2235 /* Must specify no unknown invflags */
2236 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2237 }
2238
2239 /* The built-in targets: standard (NULL) and error. */
2240 static struct xt_target ip6t_standard_target __read_mostly = {
2241 .name = IP6T_STANDARD_TARGET,
2242 .targetsize = sizeof(int),
2243 .family = AF_INET6,
2244 #ifdef CONFIG_COMPAT
2245 .compatsize = sizeof(compat_int_t),
2246 .compat_from_user = compat_standard_from_user,
2247 .compat_to_user = compat_standard_to_user,
2248 #endif
2249 };
2250
2251 static struct xt_target ip6t_error_target __read_mostly = {
2252 .name = IP6T_ERROR_TARGET,
2253 .target = ip6t_error,
2254 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2255 .family = AF_INET6,
2256 };
2257
2258 static struct nf_sockopt_ops ip6t_sockopts = {
2259 .pf = PF_INET6,
2260 .set_optmin = IP6T_BASE_CTL,
2261 .set_optmax = IP6T_SO_SET_MAX+1,
2262 .set = do_ip6t_set_ctl,
2263 #ifdef CONFIG_COMPAT
2264 .compat_set = compat_do_ip6t_set_ctl,
2265 #endif
2266 .get_optmin = IP6T_BASE_CTL,
2267 .get_optmax = IP6T_SO_GET_MAX+1,
2268 .get = do_ip6t_get_ctl,
2269 #ifdef CONFIG_COMPAT
2270 .compat_get = compat_do_ip6t_get_ctl,
2271 #endif
2272 .owner = THIS_MODULE,
2273 };
2274
2275 static struct xt_match icmp6_matchstruct __read_mostly = {
2276 .name = "icmp6",
2277 .match = icmp6_match,
2278 .matchsize = sizeof(struct ip6t_icmp),
2279 .checkentry = icmp6_checkentry,
2280 .proto = IPPROTO_ICMPV6,
2281 .family = AF_INET6,
2282 };
2283
2284 static int __net_init ip6_tables_net_init(struct net *net)
2285 {
2286 return xt_proto_init(net, AF_INET6);
2287 }
2288
2289 static void __net_exit ip6_tables_net_exit(struct net *net)
2290 {
2291 xt_proto_fini(net, AF_INET6);
2292 }
2293
2294 static struct pernet_operations ip6_tables_net_ops = {
2295 .init = ip6_tables_net_init,
2296 .exit = ip6_tables_net_exit,
2297 };
2298
2299 static int __init ip6_tables_init(void)
2300 {
2301 int ret;
2302
2303 ret = register_pernet_subsys(&ip6_tables_net_ops);
2304 if (ret < 0)
2305 goto err1;
2306
2307 /* Noone else will be downing sem now, so we won't sleep */
2308 ret = xt_register_target(&ip6t_standard_target);
2309 if (ret < 0)
2310 goto err2;
2311 ret = xt_register_target(&ip6t_error_target);
2312 if (ret < 0)
2313 goto err3;
2314 ret = xt_register_match(&icmp6_matchstruct);
2315 if (ret < 0)
2316 goto err4;
2317
2318 /* Register setsockopt */
2319 ret = nf_register_sockopt(&ip6t_sockopts);
2320 if (ret < 0)
2321 goto err5;
2322
2323 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2324 return 0;
2325
2326 err5:
2327 xt_unregister_match(&icmp6_matchstruct);
2328 err4:
2329 xt_unregister_target(&ip6t_error_target);
2330 err3:
2331 xt_unregister_target(&ip6t_standard_target);
2332 err2:
2333 unregister_pernet_subsys(&ip6_tables_net_ops);
2334 err1:
2335 return ret;
2336 }
2337
2338 static void __exit ip6_tables_fini(void)
2339 {
2340 nf_unregister_sockopt(&ip6t_sockopts);
2341
2342 xt_unregister_match(&icmp6_matchstruct);
2343 xt_unregister_target(&ip6t_error_target);
2344 xt_unregister_target(&ip6t_standard_target);
2345
2346 unregister_pernet_subsys(&ip6_tables_net_ops);
2347 }
2348
2349 /*
2350 * find the offset to specified header or the protocol number of last header
2351 * if target < 0. "last header" is transport protocol header, ESP, or
2352 * "No next header".
2353 *
2354 * If target header is found, its offset is set in *offset and return protocol
2355 * number. Otherwise, return -1.
2356 *
2357 * If the first fragment doesn't contain the final protocol header or
2358 * NEXTHDR_NONE it is considered invalid.
2359 *
2360 * Note that non-1st fragment is special case that "the protocol number
2361 * of last header" is "next header" field in Fragment header. In this case,
2362 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2363 * isn't NULL.
2364 *
2365 */
2366 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2367 int target, unsigned short *fragoff)
2368 {
2369 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2370 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2371 unsigned int len = skb->len - start;
2372
2373 if (fragoff)
2374 *fragoff = 0;
2375
2376 while (nexthdr != target) {
2377 struct ipv6_opt_hdr _hdr, *hp;
2378 unsigned int hdrlen;
2379
2380 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2381 if (target < 0)
2382 break;
2383 return -ENOENT;
2384 }
2385
2386 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2387 if (hp == NULL)
2388 return -EBADMSG;
2389 if (nexthdr == NEXTHDR_FRAGMENT) {
2390 unsigned short _frag_off;
2391 __be16 *fp;
2392 fp = skb_header_pointer(skb,
2393 start+offsetof(struct frag_hdr,
2394 frag_off),
2395 sizeof(_frag_off),
2396 &_frag_off);
2397 if (fp == NULL)
2398 return -EBADMSG;
2399
2400 _frag_off = ntohs(*fp) & ~0x7;
2401 if (_frag_off) {
2402 if (target < 0 &&
2403 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2404 hp->nexthdr == NEXTHDR_NONE)) {
2405 if (fragoff)
2406 *fragoff = _frag_off;
2407 return hp->nexthdr;
2408 }
2409 return -ENOENT;
2410 }
2411 hdrlen = 8;
2412 } else if (nexthdr == NEXTHDR_AUTH)
2413 hdrlen = (hp->hdrlen + 2) << 2;
2414 else
2415 hdrlen = ipv6_optlen(hp);
2416
2417 nexthdr = hp->nexthdr;
2418 len -= hdrlen;
2419 start += hdrlen;
2420 }
2421
2422 *offset = start;
2423 return nexthdr;
2424 }
2425
2426 EXPORT_SYMBOL(ip6t_register_table);
2427 EXPORT_SYMBOL(ip6t_unregister_table);
2428 EXPORT_SYMBOL(ip6t_do_table);
2429 EXPORT_SYMBOL(ip6t_ext_hdr);
2430 EXPORT_SYMBOL(ipv6_find_hdr);
2431
2432 module_init(ip6_tables_init);
2433 module_exit(ip6_tables_fini);