2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
69 return xt_alloc_initial_table(ip6t
, IP6T
);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff
*skb
,
88 const struct ip6t_ip6
*ip6info
,
89 unsigned int *protoff
,
90 int *fragoff
, bool *hotdrop
)
93 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
98 &ip6info
->src
), IP6T_INV_SRCIP
) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
100 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
114 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev
, ip6info
->iniface
,
117 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
121 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
123 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev
, ip6info
->outiface
,
126 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info
->flags
& IP6T_F_PROTO
)) {
135 unsigned short _frag_off
;
137 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
143 *fragoff
= _frag_off
;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
150 if (ip6info
->proto
== protohdr
) {
151 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info
->proto
!= 0) &&
159 !(ip6info
->invflags
& IP6T_INV_PROTO
))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
169 if (ipv6
->flags
& ~IP6T_F_MASK
) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6
->flags
& ~IP6T_F_MASK
);
174 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6
->invflags
& ~IP6T_INV_MASK
);
183 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
185 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
190 static inline struct ip6t_entry
*
191 get_entry(const void *base
, unsigned int offset
)
193 return (struct ip6t_entry
*)(base
+ offset
);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry
*e
)
200 static const struct ip6t_ip6 uncond
;
202 return e
->target_offset
== sizeof(struct ip6t_entry
) &&
203 memcmp(&e
->ipv6
, &uncond
, sizeof(uncond
)) == 0;
207 static inline const struct xt_entry_target
*
208 ip6t_get_target_c(const struct ip6t_entry
*e
)
210 return ip6t_get_target((struct ip6t_entry
*)e
);
213 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
214 /* This cries for unification! */
215 static const char *const hooknames
[] = {
216 [NF_INET_PRE_ROUTING
] = "PREROUTING",
217 [NF_INET_LOCAL_IN
] = "INPUT",
218 [NF_INET_FORWARD
] = "FORWARD",
219 [NF_INET_LOCAL_OUT
] = "OUTPUT",
220 [NF_INET_POST_ROUTING
] = "POSTROUTING",
223 enum nf_ip_trace_comments
{
224 NF_IP6_TRACE_COMMENT_RULE
,
225 NF_IP6_TRACE_COMMENT_RETURN
,
226 NF_IP6_TRACE_COMMENT_POLICY
,
229 static const char *const comments
[] = {
230 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
231 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
232 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
235 static struct nf_loginfo trace_loginfo
= {
236 .type
= NF_LOG_TYPE_LOG
,
240 .logflags
= NF_LOG_MASK
,
245 /* Mildly perf critical (only if packet tracing is on) */
247 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
248 const char *hookname
, const char **chainname
,
249 const char **comment
, unsigned int *rulenum
)
251 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
253 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
254 /* Head of user chain: ERROR target with chainname */
255 *chainname
= t
->target
.data
;
260 if (unconditional(s
) &&
261 strcmp(t
->target
.u
.kernel
.target
->name
,
262 XT_STANDARD_TARGET
) == 0 &&
264 /* Tail of chains: STANDARD target (return/policy) */
265 *comment
= *chainname
== hookname
266 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
267 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
276 static void trace_packet(const struct sk_buff
*skb
,
278 const struct net_device
*in
,
279 const struct net_device
*out
,
280 const char *tablename
,
281 const struct xt_table_info
*private,
282 const struct ip6t_entry
*e
)
284 const void *table_base
;
285 const struct ip6t_entry
*root
;
286 const char *hookname
, *chainname
, *comment
;
287 const struct ip6t_entry
*iter
;
288 unsigned int rulenum
= 0;
289 struct net
*net
= dev_net(in
? in
: out
);
291 table_base
= private->entries
[smp_processor_id()];
292 root
= get_entry(table_base
, private->hook_entry
[hook
]);
294 hookname
= chainname
= hooknames
[hook
];
295 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
297 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
298 if (get_chainname_rulenum(iter
, e
, hookname
,
299 &chainname
, &comment
, &rulenum
) != 0)
302 nf_log_packet(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
303 "TRACE: %s:%s:%s:%u ",
304 tablename
, chainname
, comment
, rulenum
);
308 static inline __pure
struct ip6t_entry
*
309 ip6t_next_entry(const struct ip6t_entry
*entry
)
311 return (void *)entry
+ entry
->next_offset
;
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 ip6t_do_table(struct sk_buff
*skb
,
318 const struct net_device
*in
,
319 const struct net_device
*out
,
320 struct xt_table
*table
)
322 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
323 /* Initializing verdict to NF_DROP keeps gcc happy. */
324 unsigned int verdict
= NF_DROP
;
325 const char *indev
, *outdev
;
326 const void *table_base
;
327 struct ip6t_entry
*e
, **jumpstack
;
328 unsigned int *stackptr
, origptr
, cpu
;
329 const struct xt_table_info
*private;
330 struct xt_action_param acpar
;
334 indev
= in
? in
->name
: nulldevname
;
335 outdev
= out
? out
->name
: nulldevname
;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
342 acpar
.hotdrop
= false;
345 acpar
.family
= NFPROTO_IPV6
;
346 acpar
.hooknum
= hook
;
348 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
351 addend
= xt_write_recseq_begin();
352 private = table
->private;
353 cpu
= smp_processor_id();
354 table_base
= private->entries
[cpu
];
355 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
356 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
359 e
= get_entry(table_base
, private->hook_entry
[hook
]);
362 const struct xt_entry_target
*t
;
363 const struct xt_entry_match
*ematch
;
367 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
368 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
370 e
= ip6t_next_entry(e
);
374 xt_ematch_foreach(ematch
, e
) {
375 acpar
.match
= ematch
->u
.kernel
.match
;
376 acpar
.matchinfo
= ematch
->data
;
377 if (!acpar
.match
->match(skb
, &acpar
))
381 ADD_COUNTER(e
->counters
, skb
->len
, 1);
383 t
= ip6t_get_target_c(e
);
384 IP_NF_ASSERT(t
->u
.kernel
.target
);
386 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
387 /* The packet is traced: log it */
388 if (unlikely(skb
->nf_trace
))
389 trace_packet(skb
, hook
, in
, out
,
390 table
->name
, private, e
);
392 /* Standard target? */
393 if (!t
->u
.kernel
.target
->target
) {
396 v
= ((struct xt_standard_target
*)t
)->verdict
;
398 /* Pop from stack? */
399 if (v
!= XT_RETURN
) {
400 verdict
= (unsigned int)(-v
) - 1;
403 if (*stackptr
<= origptr
)
404 e
= get_entry(table_base
,
405 private->underflow
[hook
]);
407 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
410 if (table_base
+ v
!= ip6t_next_entry(e
) &&
411 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
412 if (*stackptr
>= private->stacksize
) {
416 jumpstack
[(*stackptr
)++] = e
;
419 e
= get_entry(table_base
, v
);
423 acpar
.target
= t
->u
.kernel
.target
;
424 acpar
.targinfo
= t
->data
;
426 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
427 if (verdict
== XT_CONTINUE
)
428 e
= ip6t_next_entry(e
);
432 } while (!acpar
.hotdrop
);
436 xt_write_recseq_end(addend
);
439 #ifdef DEBUG_ALLOW_ALL
448 /* Figures out from what hook each rule can be called: returns 0 if
449 there are loops. Puts hook bitmask in comefrom. */
451 mark_source_chains(const struct xt_table_info
*newinfo
,
452 unsigned int valid_hooks
, void *entry0
)
456 /* No recursion; use packet counter to save back ptrs (reset
457 to 0 as we leave), and comefrom to save source hook bitmask */
458 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
459 unsigned int pos
= newinfo
->hook_entry
[hook
];
460 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
462 if (!(valid_hooks
& (1 << hook
)))
465 /* Set initial back pointer. */
466 e
->counters
.pcnt
= pos
;
469 const struct xt_standard_target
*t
470 = (void *)ip6t_get_target_c(e
);
471 int visited
= e
->comefrom
& (1 << hook
);
473 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
474 pr_err("iptables: loop hook %u pos %u %08X.\n",
475 hook
, pos
, e
->comefrom
);
478 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
480 /* Unconditional return/END. */
481 if ((unconditional(e
) &&
482 (strcmp(t
->target
.u
.user
.name
,
483 XT_STANDARD_TARGET
) == 0) &&
484 t
->verdict
< 0) || visited
) {
485 unsigned int oldpos
, size
;
487 if ((strcmp(t
->target
.u
.user
.name
,
488 XT_STANDARD_TARGET
) == 0) &&
489 t
->verdict
< -NF_MAX_VERDICT
- 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
496 /* Return: backtrack through the last
499 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
500 #ifdef DEBUG_IP_FIREWALL_USER
502 & (1 << NF_INET_NUMHOOKS
)) {
503 duprintf("Back unset "
510 pos
= e
->counters
.pcnt
;
511 e
->counters
.pcnt
= 0;
513 /* We're at the start. */
517 e
= (struct ip6t_entry
*)
519 } while (oldpos
== pos
+ e
->next_offset
);
522 size
= e
->next_offset
;
523 e
= (struct ip6t_entry
*)
524 (entry0
+ pos
+ size
);
525 e
->counters
.pcnt
= pos
;
528 int newpos
= t
->verdict
;
530 if (strcmp(t
->target
.u
.user
.name
,
531 XT_STANDARD_TARGET
) == 0 &&
533 if (newpos
> newinfo
->size
-
534 sizeof(struct ip6t_entry
)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
544 /* ... this is a fallthru */
545 newpos
= pos
+ e
->next_offset
;
547 e
= (struct ip6t_entry
*)
549 e
->counters
.pcnt
= pos
;
554 duprintf("Finished chain %u\n", hook
);
559 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
561 struct xt_mtdtor_param par
;
564 par
.match
= m
->u
.kernel
.match
;
565 par
.matchinfo
= m
->data
;
566 par
.family
= NFPROTO_IPV6
;
567 if (par
.match
->destroy
!= NULL
)
568 par
.match
->destroy(&par
);
569 module_put(par
.match
->me
);
573 check_entry(const struct ip6t_entry
*e
)
575 const struct xt_entry_target
*t
;
577 if (!ip6_checkentry(&e
->ipv6
))
580 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
584 t
= ip6t_get_target_c(e
);
585 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
592 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
594 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
597 par
->match
= m
->u
.kernel
.match
;
598 par
->matchinfo
= m
->data
;
600 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
601 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
603 duprintf("ip_tables: check failed for `%s'.\n",
611 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
613 struct xt_match
*match
;
616 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
619 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
620 return PTR_ERR(match
);
622 m
->u
.kernel
.match
= match
;
624 ret
= check_match(m
, par
);
630 module_put(m
->u
.kernel
.match
->me
);
634 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
636 struct xt_entry_target
*t
= ip6t_get_target(e
);
637 struct xt_tgchk_param par
= {
641 .target
= t
->u
.kernel
.target
,
643 .hook_mask
= e
->comefrom
,
644 .family
= NFPROTO_IPV6
,
648 t
= ip6t_get_target(e
);
649 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
650 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
652 duprintf("ip_tables: check failed for `%s'.\n",
653 t
->u
.kernel
.target
->name
);
660 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
663 struct xt_entry_target
*t
;
664 struct xt_target
*target
;
667 struct xt_mtchk_param mtpar
;
668 struct xt_entry_match
*ematch
;
673 mtpar
.entryinfo
= &e
->ipv6
;
674 mtpar
.hook_mask
= e
->comefrom
;
675 mtpar
.family
= NFPROTO_IPV6
;
676 xt_ematch_foreach(ematch
, e
) {
677 ret
= find_check_match(ematch
, &mtpar
);
679 goto cleanup_matches
;
683 t
= ip6t_get_target(e
);
684 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
686 if (IS_ERR(target
)) {
687 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
688 ret
= PTR_ERR(target
);
689 goto cleanup_matches
;
691 t
->u
.kernel
.target
= target
;
693 ret
= check_target(e
, net
, name
);
698 module_put(t
->u
.kernel
.target
->me
);
700 xt_ematch_foreach(ematch
, e
) {
703 cleanup_match(ematch
, net
);
708 static bool check_underflow(const struct ip6t_entry
*e
)
710 const struct xt_entry_target
*t
;
711 unsigned int verdict
;
713 if (!unconditional(e
))
715 t
= ip6t_get_target_c(e
);
716 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
718 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
719 verdict
= -verdict
- 1;
720 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
724 check_entry_size_and_hooks(struct ip6t_entry
*e
,
725 struct xt_table_info
*newinfo
,
726 const unsigned char *base
,
727 const unsigned char *limit
,
728 const unsigned int *hook_entries
,
729 const unsigned int *underflows
,
730 unsigned int valid_hooks
)
735 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
736 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
||
737 (unsigned char *)e
+ e
->next_offset
> limit
) {
738 duprintf("Bad offset %p\n", e
);
743 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
)) {
744 duprintf("checking: element %p size %u\n",
749 err
= check_entry(e
);
753 /* Check hooks & underflows */
754 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
755 if (!(valid_hooks
& (1 << h
)))
757 if ((unsigned char *)e
- base
== hook_entries
[h
])
758 newinfo
->hook_entry
[h
] = hook_entries
[h
];
759 if ((unsigned char *)e
- base
== underflows
[h
]) {
760 if (!check_underflow(e
)) {
761 pr_err("Underflows must be unconditional and "
762 "use the STANDARD target with "
766 newinfo
->underflow
[h
] = underflows
[h
];
770 /* Clear counters and comefrom */
771 e
->counters
= ((struct xt_counters
) { 0, 0 });
776 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
778 struct xt_tgdtor_param par
;
779 struct xt_entry_target
*t
;
780 struct xt_entry_match
*ematch
;
782 /* Cleanup all matches */
783 xt_ematch_foreach(ematch
, e
)
784 cleanup_match(ematch
, net
);
785 t
= ip6t_get_target(e
);
788 par
.target
= t
->u
.kernel
.target
;
789 par
.targinfo
= t
->data
;
790 par
.family
= NFPROTO_IPV6
;
791 if (par
.target
->destroy
!= NULL
)
792 par
.target
->destroy(&par
);
793 module_put(par
.target
->me
);
796 /* Checks and translates the user-supplied table segment (held in
799 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
800 const struct ip6t_replace
*repl
)
802 struct ip6t_entry
*iter
;
806 newinfo
->size
= repl
->size
;
807 newinfo
->number
= repl
->num_entries
;
809 /* Init all hooks to impossible value. */
810 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
811 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
812 newinfo
->underflow
[i
] = 0xFFFFFFFF;
815 duprintf("translate_table: size %u\n", newinfo
->size
);
817 /* Walk through entries, checking offsets. */
818 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
819 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
827 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
828 XT_ERROR_TARGET
) == 0)
829 ++newinfo
->stacksize
;
832 if (i
!= repl
->num_entries
) {
833 duprintf("translate_table: %u not %u entries\n",
834 i
, repl
->num_entries
);
838 /* Check hooks all assigned */
839 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
840 /* Only hooks which are valid */
841 if (!(repl
->valid_hooks
& (1 << i
)))
843 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
844 duprintf("Invalid hook entry %u %u\n",
845 i
, repl
->hook_entry
[i
]);
848 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
849 duprintf("Invalid underflow %u %u\n",
850 i
, repl
->underflow
[i
]);
855 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
858 /* Finally, each sanity check must pass */
860 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
861 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
868 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
871 cleanup_entry(iter
, net
);
876 /* And one copy for every other CPU */
877 for_each_possible_cpu(i
) {
878 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
879 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
886 get_counters(const struct xt_table_info
*t
,
887 struct xt_counters counters
[])
889 struct ip6t_entry
*iter
;
893 for_each_possible_cpu(cpu
) {
894 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
897 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
902 start
= read_seqcount_begin(s
);
903 bcnt
= iter
->counters
.bcnt
;
904 pcnt
= iter
->counters
.pcnt
;
905 } while (read_seqcount_retry(s
, start
));
907 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
913 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
915 unsigned int countersize
;
916 struct xt_counters
*counters
;
917 const struct xt_table_info
*private = table
->private;
919 /* We need atomic snapshot of counters: rest doesn't change
920 (other than comefrom, which userspace doesn't care
922 countersize
= sizeof(struct xt_counters
) * private->number
;
923 counters
= vzalloc(countersize
);
925 if (counters
== NULL
)
926 return ERR_PTR(-ENOMEM
);
928 get_counters(private, counters
);
934 copy_entries_to_user(unsigned int total_size
,
935 const struct xt_table
*table
,
936 void __user
*userptr
)
938 unsigned int off
, num
;
939 const struct ip6t_entry
*e
;
940 struct xt_counters
*counters
;
941 const struct xt_table_info
*private = table
->private;
943 const void *loc_cpu_entry
;
945 counters
= alloc_counters(table
);
946 if (IS_ERR(counters
))
947 return PTR_ERR(counters
);
949 /* choose the copy that is on our node/cpu, ...
950 * This choice is lazy (because current thread is
951 * allowed to migrate to another cpu)
953 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
954 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
959 /* FIXME: use iterator macros --RR */
960 /* ... then go back and fix counters and names */
961 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
963 const struct xt_entry_match
*m
;
964 const struct xt_entry_target
*t
;
966 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
967 if (copy_to_user(userptr
+ off
968 + offsetof(struct ip6t_entry
, counters
),
970 sizeof(counters
[num
])) != 0) {
975 for (i
= sizeof(struct ip6t_entry
);
976 i
< e
->target_offset
;
977 i
+= m
->u
.match_size
) {
980 if (copy_to_user(userptr
+ off
+ i
981 + offsetof(struct xt_entry_match
,
983 m
->u
.kernel
.match
->name
,
984 strlen(m
->u
.kernel
.match
->name
)+1)
991 t
= ip6t_get_target_c(e
);
992 if (copy_to_user(userptr
+ off
+ e
->target_offset
993 + offsetof(struct xt_entry_target
,
995 t
->u
.kernel
.target
->name
,
996 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1007 #ifdef CONFIG_COMPAT
1008 static void compat_standard_from_user(void *dst
, const void *src
)
1010 int v
= *(compat_int_t
*)src
;
1013 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1014 memcpy(dst
, &v
, sizeof(v
));
1017 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1019 compat_int_t cv
= *(int *)src
;
1022 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1023 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1026 static int compat_calc_entry(const struct ip6t_entry
*e
,
1027 const struct xt_table_info
*info
,
1028 const void *base
, struct xt_table_info
*newinfo
)
1030 const struct xt_entry_match
*ematch
;
1031 const struct xt_entry_target
*t
;
1032 unsigned int entry_offset
;
1035 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1036 entry_offset
= (void *)e
- base
;
1037 xt_ematch_foreach(ematch
, e
)
1038 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1039 t
= ip6t_get_target_c(e
);
1040 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1041 newinfo
->size
-= off
;
1042 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1046 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1047 if (info
->hook_entry
[i
] &&
1048 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1049 newinfo
->hook_entry
[i
] -= off
;
1050 if (info
->underflow
[i
] &&
1051 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1052 newinfo
->underflow
[i
] -= off
;
1057 static int compat_table_info(const struct xt_table_info
*info
,
1058 struct xt_table_info
*newinfo
)
1060 struct ip6t_entry
*iter
;
1061 void *loc_cpu_entry
;
1064 if (!newinfo
|| !info
)
1067 /* we dont care about newinfo->entries[] */
1068 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1069 newinfo
->initial_entries
= 0;
1070 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1071 xt_compat_init_offsets(AF_INET6
, info
->number
);
1072 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1073 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1081 static int get_info(struct net
*net
, void __user
*user
,
1082 const int *len
, int compat
)
1084 char name
[XT_TABLE_MAXNAMELEN
];
1088 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1089 duprintf("length %u != %zu\n", *len
,
1090 sizeof(struct ip6t_getinfo
));
1094 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1097 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1098 #ifdef CONFIG_COMPAT
1100 xt_compat_lock(AF_INET6
);
1102 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1103 "ip6table_%s", name
);
1104 if (!IS_ERR_OR_NULL(t
)) {
1105 struct ip6t_getinfo info
;
1106 const struct xt_table_info
*private = t
->private;
1107 #ifdef CONFIG_COMPAT
1108 struct xt_table_info tmp
;
1111 ret
= compat_table_info(private, &tmp
);
1112 xt_compat_flush_offsets(AF_INET6
);
1116 memset(&info
, 0, sizeof(info
));
1117 info
.valid_hooks
= t
->valid_hooks
;
1118 memcpy(info
.hook_entry
, private->hook_entry
,
1119 sizeof(info
.hook_entry
));
1120 memcpy(info
.underflow
, private->underflow
,
1121 sizeof(info
.underflow
));
1122 info
.num_entries
= private->number
;
1123 info
.size
= private->size
;
1124 strcpy(info
.name
, name
);
1126 if (copy_to_user(user
, &info
, *len
) != 0)
1134 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1135 #ifdef CONFIG_COMPAT
1137 xt_compat_unlock(AF_INET6
);
1143 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1147 struct ip6t_get_entries get
;
1150 if (*len
< sizeof(get
)) {
1151 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1154 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1156 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1157 duprintf("get_entries: %u != %zu\n",
1158 *len
, sizeof(get
) + get
.size
);
1162 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1163 if (!IS_ERR_OR_NULL(t
)) {
1164 struct xt_table_info
*private = t
->private;
1165 duprintf("t->private->number = %u\n", private->number
);
1166 if (get
.size
== private->size
)
1167 ret
= copy_entries_to_user(private->size
,
1168 t
, uptr
->entrytable
);
1170 duprintf("get_entries: I've got %u not %u!\n",
1171 private->size
, get
.size
);
1177 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1183 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1184 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1185 void __user
*counters_ptr
)
1189 struct xt_table_info
*oldinfo
;
1190 struct xt_counters
*counters
;
1191 const void *loc_cpu_old_entry
;
1192 struct ip6t_entry
*iter
;
1195 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1201 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1202 "ip6table_%s", name
);
1203 if (IS_ERR_OR_NULL(t
)) {
1204 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1205 goto free_newinfo_counters_untrans
;
1209 if (valid_hooks
!= t
->valid_hooks
) {
1210 duprintf("Valid hook crap: %08X vs %08X\n",
1211 valid_hooks
, t
->valid_hooks
);
1216 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1220 /* Update module usage count based on number of rules */
1221 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1222 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1223 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1224 (newinfo
->number
<= oldinfo
->initial_entries
))
1226 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1227 (newinfo
->number
<= oldinfo
->initial_entries
))
1230 /* Get the old counters, and synchronize with replace */
1231 get_counters(oldinfo
, counters
);
1233 /* Decrease module usage counts and free resource */
1234 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1235 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1236 cleanup_entry(iter
, net
);
1238 xt_free_table_info(oldinfo
);
1239 if (copy_to_user(counters_ptr
, counters
,
1240 sizeof(struct xt_counters
) * num_counters
) != 0) {
1241 /* Silent error, can't fail, new table is already in place */
1242 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1251 free_newinfo_counters_untrans
:
1258 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1261 struct ip6t_replace tmp
;
1262 struct xt_table_info
*newinfo
;
1263 void *loc_cpu_entry
;
1264 struct ip6t_entry
*iter
;
1266 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1269 /* overflow check */
1270 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1272 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1274 newinfo
= xt_alloc_table_info(tmp
.size
);
1278 /* choose the copy that is on our node/cpu */
1279 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1280 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1286 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1290 duprintf("ip_tables: Translated table\n");
1292 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1293 tmp
.num_counters
, tmp
.counters
);
1295 goto free_newinfo_untrans
;
1298 free_newinfo_untrans
:
1299 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1300 cleanup_entry(iter
, net
);
1302 xt_free_table_info(newinfo
);
1307 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1310 unsigned int i
, curcpu
;
1311 struct xt_counters_info tmp
;
1312 struct xt_counters
*paddc
;
1313 unsigned int num_counters
;
1318 const struct xt_table_info
*private;
1320 const void *loc_cpu_entry
;
1321 struct ip6t_entry
*iter
;
1322 unsigned int addend
;
1323 #ifdef CONFIG_COMPAT
1324 struct compat_xt_counters_info compat_tmp
;
1328 size
= sizeof(struct compat_xt_counters_info
);
1333 size
= sizeof(struct xt_counters_info
);
1336 if (copy_from_user(ptmp
, user
, size
) != 0)
1339 #ifdef CONFIG_COMPAT
1341 num_counters
= compat_tmp
.num_counters
;
1342 name
= compat_tmp
.name
;
1346 num_counters
= tmp
.num_counters
;
1350 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1353 paddc
= vmalloc(len
- size
);
1357 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1362 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1363 if (IS_ERR_OR_NULL(t
)) {
1364 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1370 private = t
->private;
1371 if (private->number
!= num_counters
) {
1373 goto unlock_up_free
;
1377 /* Choose the copy that is on our node */
1378 curcpu
= smp_processor_id();
1379 addend
= xt_write_recseq_begin();
1380 loc_cpu_entry
= private->entries
[curcpu
];
1381 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1382 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1385 xt_write_recseq_end(addend
);
1397 #ifdef CONFIG_COMPAT
1398 struct compat_ip6t_replace
{
1399 char name
[XT_TABLE_MAXNAMELEN
];
1403 u32 hook_entry
[NF_INET_NUMHOOKS
];
1404 u32 underflow
[NF_INET_NUMHOOKS
];
1406 compat_uptr_t counters
; /* struct xt_counters * */
1407 struct compat_ip6t_entry entries
[0];
1411 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1412 unsigned int *size
, struct xt_counters
*counters
,
1415 struct xt_entry_target
*t
;
1416 struct compat_ip6t_entry __user
*ce
;
1417 u_int16_t target_offset
, next_offset
;
1418 compat_uint_t origsize
;
1419 const struct xt_entry_match
*ematch
;
1423 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1424 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1425 copy_to_user(&ce
->counters
, &counters
[i
],
1426 sizeof(counters
[i
])) != 0)
1429 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1430 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1432 xt_ematch_foreach(ematch
, e
) {
1433 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1437 target_offset
= e
->target_offset
- (origsize
- *size
);
1438 t
= ip6t_get_target(e
);
1439 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1442 next_offset
= e
->next_offset
- (origsize
- *size
);
1443 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1444 put_user(next_offset
, &ce
->next_offset
) != 0)
1450 compat_find_calc_match(struct xt_entry_match
*m
,
1452 const struct ip6t_ip6
*ipv6
,
1453 unsigned int hookmask
,
1456 struct xt_match
*match
;
1458 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1459 m
->u
.user
.revision
);
1460 if (IS_ERR(match
)) {
1461 duprintf("compat_check_calc_match: `%s' not found\n",
1463 return PTR_ERR(match
);
1465 m
->u
.kernel
.match
= match
;
1466 *size
+= xt_compat_match_offset(match
);
1470 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1472 struct xt_entry_target
*t
;
1473 struct xt_entry_match
*ematch
;
1475 /* Cleanup all matches */
1476 xt_ematch_foreach(ematch
, e
)
1477 module_put(ematch
->u
.kernel
.match
->me
);
1478 t
= compat_ip6t_get_target(e
);
1479 module_put(t
->u
.kernel
.target
->me
);
1483 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1484 struct xt_table_info
*newinfo
,
1486 const unsigned char *base
,
1487 const unsigned char *limit
,
1488 const unsigned int *hook_entries
,
1489 const unsigned int *underflows
,
1492 struct xt_entry_match
*ematch
;
1493 struct xt_entry_target
*t
;
1494 struct xt_target
*target
;
1495 unsigned int entry_offset
;
1499 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1500 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1501 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
||
1502 (unsigned char *)e
+ e
->next_offset
> limit
) {
1503 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1507 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1508 sizeof(struct compat_xt_entry_target
)) {
1509 duprintf("checking: element %p size %u\n",
1514 /* For purposes of check_entry casting the compat entry is fine */
1515 ret
= check_entry((struct ip6t_entry
*)e
);
1519 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1520 entry_offset
= (void *)e
- (void *)base
;
1522 xt_ematch_foreach(ematch
, e
) {
1523 ret
= compat_find_calc_match(ematch
, name
,
1524 &e
->ipv6
, e
->comefrom
, &off
);
1526 goto release_matches
;
1530 t
= compat_ip6t_get_target(e
);
1531 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1532 t
->u
.user
.revision
);
1533 if (IS_ERR(target
)) {
1534 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1536 ret
= PTR_ERR(target
);
1537 goto release_matches
;
1539 t
->u
.kernel
.target
= target
;
1541 off
+= xt_compat_target_offset(target
);
1543 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1547 /* Check hooks & underflows */
1548 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1549 if ((unsigned char *)e
- base
== hook_entries
[h
])
1550 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1551 if ((unsigned char *)e
- base
== underflows
[h
])
1552 newinfo
->underflow
[h
] = underflows
[h
];
1555 /* Clear counters and comefrom */
1556 memset(&e
->counters
, 0, sizeof(e
->counters
));
1561 module_put(t
->u
.kernel
.target
->me
);
1563 xt_ematch_foreach(ematch
, e
) {
1566 module_put(ematch
->u
.kernel
.match
->me
);
1572 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1573 unsigned int *size
, const char *name
,
1574 struct xt_table_info
*newinfo
, unsigned char *base
)
1576 struct xt_entry_target
*t
;
1577 struct ip6t_entry
*de
;
1578 unsigned int origsize
;
1580 struct xt_entry_match
*ematch
;
1584 de
= (struct ip6t_entry
*)*dstptr
;
1585 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1586 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1588 *dstptr
+= sizeof(struct ip6t_entry
);
1589 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1591 xt_ematch_foreach(ematch
, e
) {
1592 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1596 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1597 t
= compat_ip6t_get_target(e
);
1598 xt_compat_target_from_user(t
, dstptr
, size
);
1600 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1601 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1602 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1603 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1604 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1605 newinfo
->underflow
[h
] -= origsize
- *size
;
1610 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1615 struct xt_mtchk_param mtpar
;
1616 struct xt_entry_match
*ematch
;
1621 mtpar
.entryinfo
= &e
->ipv6
;
1622 mtpar
.hook_mask
= e
->comefrom
;
1623 mtpar
.family
= NFPROTO_IPV6
;
1624 xt_ematch_foreach(ematch
, e
) {
1625 ret
= check_match(ematch
, &mtpar
);
1627 goto cleanup_matches
;
1631 ret
= check_target(e
, net
, name
);
1633 goto cleanup_matches
;
1637 xt_ematch_foreach(ematch
, e
) {
1640 cleanup_match(ematch
, net
);
1646 translate_compat_table(struct net
*net
,
1648 unsigned int valid_hooks
,
1649 struct xt_table_info
**pinfo
,
1651 unsigned int total_size
,
1652 unsigned int number
,
1653 unsigned int *hook_entries
,
1654 unsigned int *underflows
)
1657 struct xt_table_info
*newinfo
, *info
;
1658 void *pos
, *entry0
, *entry1
;
1659 struct compat_ip6t_entry
*iter0
;
1660 struct ip6t_entry
*iter1
;
1667 info
->number
= number
;
1669 /* Init all hooks to impossible value. */
1670 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1671 info
->hook_entry
[i
] = 0xFFFFFFFF;
1672 info
->underflow
[i
] = 0xFFFFFFFF;
1675 duprintf("translate_compat_table: size %u\n", info
->size
);
1677 xt_compat_lock(AF_INET6
);
1678 xt_compat_init_offsets(AF_INET6
, number
);
1679 /* Walk through entries, checking offsets. */
1680 xt_entry_foreach(iter0
, entry0
, total_size
) {
1681 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1683 entry0
+ total_size
,
1694 duprintf("translate_compat_table: %u not %u entries\n",
1699 /* Check hooks all assigned */
1700 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1701 /* Only hooks which are valid */
1702 if (!(valid_hooks
& (1 << i
)))
1704 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1705 duprintf("Invalid hook entry %u %u\n",
1706 i
, hook_entries
[i
]);
1709 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1710 duprintf("Invalid underflow %u %u\n",
1717 newinfo
= xt_alloc_table_info(size
);
1721 newinfo
->number
= number
;
1722 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1723 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1724 newinfo
->underflow
[i
] = info
->underflow
[i
];
1726 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1729 xt_entry_foreach(iter0
, entry0
, total_size
) {
1730 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1731 name
, newinfo
, entry1
);
1735 xt_compat_flush_offsets(AF_INET6
);
1736 xt_compat_unlock(AF_INET6
);
1741 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1745 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1746 ret
= compat_check_entry(iter1
, net
, name
);
1750 if (strcmp(ip6t_get_target(iter1
)->u
.user
.name
,
1751 XT_ERROR_TARGET
) == 0)
1752 ++newinfo
->stacksize
;
1756 * The first i matches need cleanup_entry (calls ->destroy)
1757 * because they had called ->check already. The other j-i
1758 * entries need only release.
1762 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1767 compat_release_entry(iter0
);
1769 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1772 cleanup_entry(iter1
, net
);
1774 xt_free_table_info(newinfo
);
1778 /* And one copy for every other CPU */
1779 for_each_possible_cpu(i
)
1780 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1781 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1785 xt_free_table_info(info
);
1789 xt_free_table_info(newinfo
);
1791 xt_entry_foreach(iter0
, entry0
, total_size
) {
1794 compat_release_entry(iter0
);
1798 xt_compat_flush_offsets(AF_INET6
);
1799 xt_compat_unlock(AF_INET6
);
1804 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1807 struct compat_ip6t_replace tmp
;
1808 struct xt_table_info
*newinfo
;
1809 void *loc_cpu_entry
;
1810 struct ip6t_entry
*iter
;
1812 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1815 /* overflow check */
1816 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1818 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1820 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1822 newinfo
= xt_alloc_table_info(tmp
.size
);
1826 /* choose the copy that is on our node/cpu */
1827 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1828 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1834 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1835 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1836 tmp
.num_entries
, tmp
.hook_entry
,
1841 duprintf("compat_do_replace: Translated table\n");
1843 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1844 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1846 goto free_newinfo_untrans
;
1849 free_newinfo_untrans
:
1850 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1851 cleanup_entry(iter
, net
);
1853 xt_free_table_info(newinfo
);
1858 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1863 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1867 case IP6T_SO_SET_REPLACE
:
1868 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1871 case IP6T_SO_SET_ADD_COUNTERS
:
1872 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1876 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1883 struct compat_ip6t_get_entries
{
1884 char name
[XT_TABLE_MAXNAMELEN
];
1886 struct compat_ip6t_entry entrytable
[0];
1890 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1891 void __user
*userptr
)
1893 struct xt_counters
*counters
;
1894 const struct xt_table_info
*private = table
->private;
1898 const void *loc_cpu_entry
;
1900 struct ip6t_entry
*iter
;
1902 counters
= alloc_counters(table
);
1903 if (IS_ERR(counters
))
1904 return PTR_ERR(counters
);
1906 /* choose the copy that is on our node/cpu, ...
1907 * This choice is lazy (because current thread is
1908 * allowed to migrate to another cpu)
1910 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1913 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1914 ret
= compat_copy_entry_to_user(iter
, &pos
,
1915 &size
, counters
, i
++);
1925 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1929 struct compat_ip6t_get_entries get
;
1932 if (*len
< sizeof(get
)) {
1933 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1937 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1940 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1941 duprintf("compat_get_entries: %u != %zu\n",
1942 *len
, sizeof(get
) + get
.size
);
1946 xt_compat_lock(AF_INET6
);
1947 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1948 if (!IS_ERR_OR_NULL(t
)) {
1949 const struct xt_table_info
*private = t
->private;
1950 struct xt_table_info info
;
1951 duprintf("t->private->number = %u\n", private->number
);
1952 ret
= compat_table_info(private, &info
);
1953 if (!ret
&& get
.size
== info
.size
) {
1954 ret
= compat_copy_entries_to_user(private->size
,
1955 t
, uptr
->entrytable
);
1957 duprintf("compat_get_entries: I've got %u not %u!\n",
1958 private->size
, get
.size
);
1961 xt_compat_flush_offsets(AF_INET6
);
1965 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1967 xt_compat_unlock(AF_INET6
);
1971 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1974 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1978 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1982 case IP6T_SO_GET_INFO
:
1983 ret
= get_info(sock_net(sk
), user
, len
, 1);
1985 case IP6T_SO_GET_ENTRIES
:
1986 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1989 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
1996 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2000 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2004 case IP6T_SO_SET_REPLACE
:
2005 ret
= do_replace(sock_net(sk
), user
, len
);
2008 case IP6T_SO_SET_ADD_COUNTERS
:
2009 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2013 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2021 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2025 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2029 case IP6T_SO_GET_INFO
:
2030 ret
= get_info(sock_net(sk
), user
, len
, 0);
2033 case IP6T_SO_GET_ENTRIES
:
2034 ret
= get_entries(sock_net(sk
), user
, len
);
2037 case IP6T_SO_GET_REVISION_MATCH
:
2038 case IP6T_SO_GET_REVISION_TARGET
: {
2039 struct xt_get_revision rev
;
2042 if (*len
!= sizeof(rev
)) {
2046 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2050 rev
.name
[sizeof(rev
.name
)-1] = 0;
2052 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2057 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2060 "ip6t_%s", rev
.name
);
2065 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2072 struct xt_table
*ip6t_register_table(struct net
*net
,
2073 const struct xt_table
*table
,
2074 const struct ip6t_replace
*repl
)
2077 struct xt_table_info
*newinfo
;
2078 struct xt_table_info bootstrap
= {0};
2079 void *loc_cpu_entry
;
2080 struct xt_table
*new_table
;
2082 newinfo
= xt_alloc_table_info(repl
->size
);
2088 /* choose the copy on our node/cpu, but dont care about preemption */
2089 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2090 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2092 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2096 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2097 if (IS_ERR(new_table
)) {
2098 ret
= PTR_ERR(new_table
);
2104 xt_free_table_info(newinfo
);
2106 return ERR_PTR(ret
);
2109 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2111 struct xt_table_info
*private;
2112 void *loc_cpu_entry
;
2113 struct module
*table_owner
= table
->me
;
2114 struct ip6t_entry
*iter
;
2116 private = xt_unregister_table(table
);
2118 /* Decrease module usage counts and free resources */
2119 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2120 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2121 cleanup_entry(iter
, net
);
2122 if (private->number
> private->initial_entries
)
2123 module_put(table_owner
);
2124 xt_free_table_info(private);
2127 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2129 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2130 u_int8_t type
, u_int8_t code
,
2133 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2138 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2140 const struct icmp6hdr
*ic
;
2141 struct icmp6hdr _icmph
;
2142 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2144 /* Must not be a fragment. */
2145 if (par
->fragoff
!= 0)
2148 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2150 /* We've been asked to examine this packet, and we
2151 * can't. Hence, no choice but to drop.
2153 duprintf("Dropping evil ICMP tinygram.\n");
2154 par
->hotdrop
= true;
2158 return icmp6_type_code_match(icmpinfo
->type
,
2161 ic
->icmp6_type
, ic
->icmp6_code
,
2162 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2165 /* Called when user tries to insert an entry of this type. */
2166 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2168 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2170 /* Must specify no unknown invflags */
2171 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2174 /* The built-in targets: standard (NULL) and error. */
2175 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2177 .name
= XT_STANDARD_TARGET
,
2178 .targetsize
= sizeof(int),
2179 .family
= NFPROTO_IPV6
,
2180 #ifdef CONFIG_COMPAT
2181 .compatsize
= sizeof(compat_int_t
),
2182 .compat_from_user
= compat_standard_from_user
,
2183 .compat_to_user
= compat_standard_to_user
,
2187 .name
= XT_ERROR_TARGET
,
2188 .target
= ip6t_error
,
2189 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2190 .family
= NFPROTO_IPV6
,
2194 static struct nf_sockopt_ops ip6t_sockopts
= {
2196 .set_optmin
= IP6T_BASE_CTL
,
2197 .set_optmax
= IP6T_SO_SET_MAX
+1,
2198 .set
= do_ip6t_set_ctl
,
2199 #ifdef CONFIG_COMPAT
2200 .compat_set
= compat_do_ip6t_set_ctl
,
2202 .get_optmin
= IP6T_BASE_CTL
,
2203 .get_optmax
= IP6T_SO_GET_MAX
+1,
2204 .get
= do_ip6t_get_ctl
,
2205 #ifdef CONFIG_COMPAT
2206 .compat_get
= compat_do_ip6t_get_ctl
,
2208 .owner
= THIS_MODULE
,
2211 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2214 .match
= icmp6_match
,
2215 .matchsize
= sizeof(struct ip6t_icmp
),
2216 .checkentry
= icmp6_checkentry
,
2217 .proto
= IPPROTO_ICMPV6
,
2218 .family
= NFPROTO_IPV6
,
2222 static int __net_init
ip6_tables_net_init(struct net
*net
)
2224 return xt_proto_init(net
, NFPROTO_IPV6
);
2227 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2229 xt_proto_fini(net
, NFPROTO_IPV6
);
2232 static struct pernet_operations ip6_tables_net_ops
= {
2233 .init
= ip6_tables_net_init
,
2234 .exit
= ip6_tables_net_exit
,
2237 static int __init
ip6_tables_init(void)
2241 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2245 /* No one else will be downing sem now, so we won't sleep */
2246 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2249 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2253 /* Register setsockopt */
2254 ret
= nf_register_sockopt(&ip6t_sockopts
);
2258 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2262 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2264 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2266 unregister_pernet_subsys(&ip6_tables_net_ops
);
2271 static void __exit
ip6_tables_fini(void)
2273 nf_unregister_sockopt(&ip6t_sockopts
);
2275 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2276 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2277 unregister_pernet_subsys(&ip6_tables_net_ops
);
2280 EXPORT_SYMBOL(ip6t_register_table
);
2281 EXPORT_SYMBOL(ip6t_unregister_table
);
2282 EXPORT_SYMBOL(ip6t_do_table
);
2284 module_init(ip6_tables_init
);
2285 module_exit(ip6_tables_fini
);