2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
69 return xt_alloc_initial_table(ip6t
, IP6T
);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff
*skb
,
88 const struct ip6t_ip6
*ip6info
,
89 unsigned int *protoff
,
90 int *fragoff
, bool *hotdrop
)
93 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
98 &ip6info
->src
), IP6T_INV_SRCIP
) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
100 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
114 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev
, ip6info
->iniface
,
117 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
121 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
123 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev
, ip6info
->outiface
,
126 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info
->flags
& IP6T_F_PROTO
)) {
135 unsigned short _frag_off
;
137 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
143 *fragoff
= _frag_off
;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
150 if (ip6info
->proto
== protohdr
) {
151 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info
->proto
!= 0) &&
159 !(ip6info
->invflags
& IP6T_INV_PROTO
))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
169 if (ipv6
->flags
& ~IP6T_F_MASK
) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6
->flags
& ~IP6T_F_MASK
);
174 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6
->invflags
& ~IP6T_INV_MASK
);
183 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
185 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
190 static inline struct ip6t_entry
*
191 get_entry(const void *base
, unsigned int offset
)
193 return (struct ip6t_entry
*)(base
+ offset
);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
200 static const struct ip6t_ip6 uncond
;
202 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
205 static inline const struct xt_entry_target
*
206 ip6t_get_target_c(const struct ip6t_entry
*e
)
208 return ip6t_get_target((struct ip6t_entry
*)e
);
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames
[] = {
214 [NF_INET_PRE_ROUTING
] = "PREROUTING",
215 [NF_INET_LOCAL_IN
] = "INPUT",
216 [NF_INET_FORWARD
] = "FORWARD",
217 [NF_INET_LOCAL_OUT
] = "OUTPUT",
218 [NF_INET_POST_ROUTING
] = "POSTROUTING",
221 enum nf_ip_trace_comments
{
222 NF_IP6_TRACE_COMMENT_RULE
,
223 NF_IP6_TRACE_COMMENT_RETURN
,
224 NF_IP6_TRACE_COMMENT_POLICY
,
227 static const char *const comments
[] = {
228 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
233 static struct nf_loginfo trace_loginfo
= {
234 .type
= NF_LOG_TYPE_LOG
,
238 .logflags
= NF_LOG_MASK
,
243 /* Mildly perf critical (only if packet tracing is on) */
245 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
246 const char *hookname
, const char **chainname
,
247 const char **comment
, unsigned int *rulenum
)
249 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
251 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname
= t
->target
.data
;
258 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
259 strcmp(t
->target
.u
.kernel
.target
->name
,
260 XT_STANDARD_TARGET
) == 0 &&
262 unconditional(&s
->ipv6
)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment
= *chainname
== hookname
265 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
266 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
275 static void trace_packet(const struct sk_buff
*skb
,
277 const struct net_device
*in
,
278 const struct net_device
*out
,
279 const char *tablename
,
280 const struct xt_table_info
*private,
281 const struct ip6t_entry
*e
)
283 const void *table_base
;
284 const struct ip6t_entry
*root
;
285 const char *hookname
, *chainname
, *comment
;
286 const struct ip6t_entry
*iter
;
287 unsigned int rulenum
= 0;
288 struct net
*net
= dev_net(in
? in
: out
);
290 table_base
= private->entries
[smp_processor_id()];
291 root
= get_entry(table_base
, private->hook_entry
[hook
]);
293 hookname
= chainname
= hooknames
[hook
];
294 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
296 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
297 if (get_chainname_rulenum(iter
, e
, hookname
,
298 &chainname
, &comment
, &rulenum
) != 0)
301 nf_log_packet(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
302 "TRACE: %s:%s:%s:%u ",
303 tablename
, chainname
, comment
, rulenum
);
307 static inline __pure
struct ip6t_entry
*
308 ip6t_next_entry(const struct ip6t_entry
*entry
)
310 return (void *)entry
+ entry
->next_offset
;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff
*skb
,
317 const struct net_device
*in
,
318 const struct net_device
*out
,
319 struct xt_table
*table
)
321 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict
= NF_DROP
;
324 const char *indev
, *outdev
;
325 const void *table_base
;
326 struct ip6t_entry
*e
, **jumpstack
;
327 unsigned int *stackptr
, origptr
, cpu
;
328 const struct xt_table_info
*private;
329 struct xt_action_param acpar
;
333 indev
= in
? in
->name
: nulldevname
;
334 outdev
= out
? out
->name
: nulldevname
;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar
.hotdrop
= false;
344 acpar
.family
= NFPROTO_IPV6
;
345 acpar
.hooknum
= hook
;
347 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
350 addend
= xt_write_recseq_begin();
351 private = table
->private;
352 cpu
= smp_processor_id();
353 table_base
= private->entries
[cpu
];
354 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
355 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
358 e
= get_entry(table_base
, private->hook_entry
[hook
]);
361 const struct xt_entry_target
*t
;
362 const struct xt_entry_match
*ematch
;
366 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
367 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
369 e
= ip6t_next_entry(e
);
373 xt_ematch_foreach(ematch
, e
) {
374 acpar
.match
= ematch
->u
.kernel
.match
;
375 acpar
.matchinfo
= ematch
->data
;
376 if (!acpar
.match
->match(skb
, &acpar
))
380 ADD_COUNTER(e
->counters
, skb
->len
, 1);
382 t
= ip6t_get_target_c(e
);
383 IP_NF_ASSERT(t
->u
.kernel
.target
);
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb
->nf_trace
))
388 trace_packet(skb
, hook
, in
, out
,
389 table
->name
, private, e
);
391 /* Standard target? */
392 if (!t
->u
.kernel
.target
->target
) {
395 v
= ((struct xt_standard_target
*)t
)->verdict
;
397 /* Pop from stack? */
398 if (v
!= XT_RETURN
) {
399 verdict
= (unsigned int)(-v
) - 1;
402 if (*stackptr
<= origptr
)
403 e
= get_entry(table_base
,
404 private->underflow
[hook
]);
406 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
409 if (table_base
+ v
!= ip6t_next_entry(e
) &&
410 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
411 if (*stackptr
>= private->stacksize
) {
415 jumpstack
[(*stackptr
)++] = e
;
418 e
= get_entry(table_base
, v
);
422 acpar
.target
= t
->u
.kernel
.target
;
423 acpar
.targinfo
= t
->data
;
425 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
426 if (verdict
== XT_CONTINUE
)
427 e
= ip6t_next_entry(e
);
431 } while (!acpar
.hotdrop
);
435 xt_write_recseq_end(addend
);
438 #ifdef DEBUG_ALLOW_ALL
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
450 mark_source_chains(const struct xt_table_info
*newinfo
,
451 unsigned int valid_hooks
, void *entry0
)
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
458 unsigned int pos
= newinfo
->hook_entry
[hook
];
459 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
461 if (!(valid_hooks
& (1 << hook
)))
464 /* Set initial back pointer. */
465 e
->counters
.pcnt
= pos
;
468 const struct xt_standard_target
*t
469 = (void *)ip6t_get_target_c(e
);
470 int visited
= e
->comefrom
& (1 << hook
);
472 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook
, pos
, e
->comefrom
);
477 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
479 /* Unconditional return/END. */
480 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
481 (strcmp(t
->target
.u
.user
.name
,
482 XT_STANDARD_TARGET
) == 0) &&
484 unconditional(&e
->ipv6
)) || visited
) {
485 unsigned int oldpos
, size
;
487 if ((strcmp(t
->target
.u
.user
.name
,
488 XT_STANDARD_TARGET
) == 0) &&
489 t
->verdict
< -NF_MAX_VERDICT
- 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
496 /* Return: backtrack through the last
499 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
500 #ifdef DEBUG_IP_FIREWALL_USER
502 & (1 << NF_INET_NUMHOOKS
)) {
503 duprintf("Back unset "
510 pos
= e
->counters
.pcnt
;
511 e
->counters
.pcnt
= 0;
513 /* We're at the start. */
517 e
= (struct ip6t_entry
*)
519 } while (oldpos
== pos
+ e
->next_offset
);
522 size
= e
->next_offset
;
523 e
= (struct ip6t_entry
*)
524 (entry0
+ pos
+ size
);
525 e
->counters
.pcnt
= pos
;
528 int newpos
= t
->verdict
;
530 if (strcmp(t
->target
.u
.user
.name
,
531 XT_STANDARD_TARGET
) == 0 &&
533 if (newpos
> newinfo
->size
-
534 sizeof(struct ip6t_entry
)) {
535 duprintf("mark_source_chains: "
536 "bad verdict (%i)\n",
540 /* This a jump; chase it. */
541 duprintf("Jump rule %u -> %u\n",
544 /* ... this is a fallthru */
545 newpos
= pos
+ e
->next_offset
;
547 e
= (struct ip6t_entry
*)
549 e
->counters
.pcnt
= pos
;
554 duprintf("Finished chain %u\n", hook
);
559 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
561 struct xt_mtdtor_param par
;
564 par
.match
= m
->u
.kernel
.match
;
565 par
.matchinfo
= m
->data
;
566 par
.family
= NFPROTO_IPV6
;
567 if (par
.match
->destroy
!= NULL
)
568 par
.match
->destroy(&par
);
569 module_put(par
.match
->me
);
573 check_entry(const struct ip6t_entry
*e
)
575 const struct xt_entry_target
*t
;
577 if (!ip6_checkentry(&e
->ipv6
))
580 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
584 t
= ip6t_get_target_c(e
);
585 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
591 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
593 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
596 par
->match
= m
->u
.kernel
.match
;
597 par
->matchinfo
= m
->data
;
599 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
600 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
602 duprintf("ip_tables: check failed for `%s'.\n",
610 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
612 struct xt_match
*match
;
615 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
618 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
619 return PTR_ERR(match
);
621 m
->u
.kernel
.match
= match
;
623 ret
= check_match(m
, par
);
629 module_put(m
->u
.kernel
.match
->me
);
633 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
635 struct xt_entry_target
*t
= ip6t_get_target(e
);
636 struct xt_tgchk_param par
= {
640 .target
= t
->u
.kernel
.target
,
642 .hook_mask
= e
->comefrom
,
643 .family
= NFPROTO_IPV6
,
647 t
= ip6t_get_target(e
);
648 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
649 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
651 duprintf("ip_tables: check failed for `%s'.\n",
652 t
->u
.kernel
.target
->name
);
659 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
662 struct xt_entry_target
*t
;
663 struct xt_target
*target
;
666 struct xt_mtchk_param mtpar
;
667 struct xt_entry_match
*ematch
;
672 mtpar
.entryinfo
= &e
->ipv6
;
673 mtpar
.hook_mask
= e
->comefrom
;
674 mtpar
.family
= NFPROTO_IPV6
;
675 xt_ematch_foreach(ematch
, e
) {
676 ret
= find_check_match(ematch
, &mtpar
);
678 goto cleanup_matches
;
682 t
= ip6t_get_target(e
);
683 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
685 if (IS_ERR(target
)) {
686 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
687 ret
= PTR_ERR(target
);
688 goto cleanup_matches
;
690 t
->u
.kernel
.target
= target
;
692 ret
= check_target(e
, net
, name
);
697 module_put(t
->u
.kernel
.target
->me
);
699 xt_ematch_foreach(ematch
, e
) {
702 cleanup_match(ematch
, net
);
707 static bool check_underflow(const struct ip6t_entry
*e
)
709 const struct xt_entry_target
*t
;
710 unsigned int verdict
;
712 if (!unconditional(&e
->ipv6
))
714 t
= ip6t_get_target_c(e
);
715 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
717 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
718 verdict
= -verdict
- 1;
719 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
723 check_entry_size_and_hooks(struct ip6t_entry
*e
,
724 struct xt_table_info
*newinfo
,
725 const unsigned char *base
,
726 const unsigned char *limit
,
727 const unsigned int *hook_entries
,
728 const unsigned int *underflows
,
729 unsigned int valid_hooks
)
734 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
735 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
736 duprintf("Bad offset %p\n", e
);
741 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
)) {
742 duprintf("checking: element %p size %u\n",
747 err
= check_entry(e
);
751 /* Check hooks & underflows */
752 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
753 if (!(valid_hooks
& (1 << h
)))
755 if ((unsigned char *)e
- base
== hook_entries
[h
])
756 newinfo
->hook_entry
[h
] = hook_entries
[h
];
757 if ((unsigned char *)e
- base
== underflows
[h
]) {
758 if (!check_underflow(e
)) {
759 pr_err("Underflows must be unconditional and "
760 "use the STANDARD target with "
764 newinfo
->underflow
[h
] = underflows
[h
];
768 /* Clear counters and comefrom */
769 e
->counters
= ((struct xt_counters
) { 0, 0 });
774 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
776 struct xt_tgdtor_param par
;
777 struct xt_entry_target
*t
;
778 struct xt_entry_match
*ematch
;
780 /* Cleanup all matches */
781 xt_ematch_foreach(ematch
, e
)
782 cleanup_match(ematch
, net
);
783 t
= ip6t_get_target(e
);
786 par
.target
= t
->u
.kernel
.target
;
787 par
.targinfo
= t
->data
;
788 par
.family
= NFPROTO_IPV6
;
789 if (par
.target
->destroy
!= NULL
)
790 par
.target
->destroy(&par
);
791 module_put(par
.target
->me
);
794 /* Checks and translates the user-supplied table segment (held in
797 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
798 const struct ip6t_replace
*repl
)
800 struct ip6t_entry
*iter
;
804 newinfo
->size
= repl
->size
;
805 newinfo
->number
= repl
->num_entries
;
807 /* Init all hooks to impossible value. */
808 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
809 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
810 newinfo
->underflow
[i
] = 0xFFFFFFFF;
813 duprintf("translate_table: size %u\n", newinfo
->size
);
815 /* Walk through entries, checking offsets. */
816 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
817 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
825 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
826 XT_ERROR_TARGET
) == 0)
827 ++newinfo
->stacksize
;
830 if (i
!= repl
->num_entries
) {
831 duprintf("translate_table: %u not %u entries\n",
832 i
, repl
->num_entries
);
836 /* Check hooks all assigned */
837 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
838 /* Only hooks which are valid */
839 if (!(repl
->valid_hooks
& (1 << i
)))
841 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
842 duprintf("Invalid hook entry %u %u\n",
843 i
, repl
->hook_entry
[i
]);
846 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
847 duprintf("Invalid underflow %u %u\n",
848 i
, repl
->underflow
[i
]);
853 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
856 /* Finally, each sanity check must pass */
858 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
859 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
866 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
869 cleanup_entry(iter
, net
);
874 /* And one copy for every other CPU */
875 for_each_possible_cpu(i
) {
876 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
877 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
884 get_counters(const struct xt_table_info
*t
,
885 struct xt_counters counters
[])
887 struct ip6t_entry
*iter
;
891 for_each_possible_cpu(cpu
) {
892 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
895 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
900 start
= read_seqcount_begin(s
);
901 bcnt
= iter
->counters
.bcnt
;
902 pcnt
= iter
->counters
.pcnt
;
903 } while (read_seqcount_retry(s
, start
));
905 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
911 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
913 unsigned int countersize
;
914 struct xt_counters
*counters
;
915 const struct xt_table_info
*private = table
->private;
917 /* We need atomic snapshot of counters: rest doesn't change
918 (other than comefrom, which userspace doesn't care
920 countersize
= sizeof(struct xt_counters
) * private->number
;
921 counters
= vzalloc(countersize
);
923 if (counters
== NULL
)
924 return ERR_PTR(-ENOMEM
);
926 get_counters(private, counters
);
932 copy_entries_to_user(unsigned int total_size
,
933 const struct xt_table
*table
,
934 void __user
*userptr
)
936 unsigned int off
, num
;
937 const struct ip6t_entry
*e
;
938 struct xt_counters
*counters
;
939 const struct xt_table_info
*private = table
->private;
941 const void *loc_cpu_entry
;
943 counters
= alloc_counters(table
);
944 if (IS_ERR(counters
))
945 return PTR_ERR(counters
);
947 /* choose the copy that is on our node/cpu, ...
948 * This choice is lazy (because current thread is
949 * allowed to migrate to another cpu)
951 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
952 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
957 /* FIXME: use iterator macros --RR */
958 /* ... then go back and fix counters and names */
959 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
961 const struct xt_entry_match
*m
;
962 const struct xt_entry_target
*t
;
964 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
965 if (copy_to_user(userptr
+ off
966 + offsetof(struct ip6t_entry
, counters
),
968 sizeof(counters
[num
])) != 0) {
973 for (i
= sizeof(struct ip6t_entry
);
974 i
< e
->target_offset
;
975 i
+= m
->u
.match_size
) {
978 if (copy_to_user(userptr
+ off
+ i
979 + offsetof(struct xt_entry_match
,
981 m
->u
.kernel
.match
->name
,
982 strlen(m
->u
.kernel
.match
->name
)+1)
989 t
= ip6t_get_target_c(e
);
990 if (copy_to_user(userptr
+ off
+ e
->target_offset
991 + offsetof(struct xt_entry_target
,
993 t
->u
.kernel
.target
->name
,
994 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1005 #ifdef CONFIG_COMPAT
1006 static void compat_standard_from_user(void *dst
, const void *src
)
1008 int v
= *(compat_int_t
*)src
;
1011 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1012 memcpy(dst
, &v
, sizeof(v
));
1015 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1017 compat_int_t cv
= *(int *)src
;
1020 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1021 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1024 static int compat_calc_entry(const struct ip6t_entry
*e
,
1025 const struct xt_table_info
*info
,
1026 const void *base
, struct xt_table_info
*newinfo
)
1028 const struct xt_entry_match
*ematch
;
1029 const struct xt_entry_target
*t
;
1030 unsigned int entry_offset
;
1033 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1034 entry_offset
= (void *)e
- base
;
1035 xt_ematch_foreach(ematch
, e
)
1036 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1037 t
= ip6t_get_target_c(e
);
1038 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1039 newinfo
->size
-= off
;
1040 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1044 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1045 if (info
->hook_entry
[i
] &&
1046 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1047 newinfo
->hook_entry
[i
] -= off
;
1048 if (info
->underflow
[i
] &&
1049 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1050 newinfo
->underflow
[i
] -= off
;
1055 static int compat_table_info(const struct xt_table_info
*info
,
1056 struct xt_table_info
*newinfo
)
1058 struct ip6t_entry
*iter
;
1059 void *loc_cpu_entry
;
1062 if (!newinfo
|| !info
)
1065 /* we dont care about newinfo->entries[] */
1066 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1067 newinfo
->initial_entries
= 0;
1068 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1069 xt_compat_init_offsets(AF_INET6
, info
->number
);
1070 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1071 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1079 static int get_info(struct net
*net
, void __user
*user
,
1080 const int *len
, int compat
)
1082 char name
[XT_TABLE_MAXNAMELEN
];
1086 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1087 duprintf("length %u != %zu\n", *len
,
1088 sizeof(struct ip6t_getinfo
));
1092 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1095 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1096 #ifdef CONFIG_COMPAT
1098 xt_compat_lock(AF_INET6
);
1100 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1101 "ip6table_%s", name
);
1102 if (!IS_ERR_OR_NULL(t
)) {
1103 struct ip6t_getinfo info
;
1104 const struct xt_table_info
*private = t
->private;
1105 #ifdef CONFIG_COMPAT
1106 struct xt_table_info tmp
;
1109 ret
= compat_table_info(private, &tmp
);
1110 xt_compat_flush_offsets(AF_INET6
);
1114 memset(&info
, 0, sizeof(info
));
1115 info
.valid_hooks
= t
->valid_hooks
;
1116 memcpy(info
.hook_entry
, private->hook_entry
,
1117 sizeof(info
.hook_entry
));
1118 memcpy(info
.underflow
, private->underflow
,
1119 sizeof(info
.underflow
));
1120 info
.num_entries
= private->number
;
1121 info
.size
= private->size
;
1122 strcpy(info
.name
, name
);
1124 if (copy_to_user(user
, &info
, *len
) != 0)
1132 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_unlock(AF_INET6
);
1141 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1145 struct ip6t_get_entries get
;
1148 if (*len
< sizeof(get
)) {
1149 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1152 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1154 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1155 duprintf("get_entries: %u != %zu\n",
1156 *len
, sizeof(get
) + get
.size
);
1160 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1161 if (!IS_ERR_OR_NULL(t
)) {
1162 struct xt_table_info
*private = t
->private;
1163 duprintf("t->private->number = %u\n", private->number
);
1164 if (get
.size
== private->size
)
1165 ret
= copy_entries_to_user(private->size
,
1166 t
, uptr
->entrytable
);
1168 duprintf("get_entries: I've got %u not %u!\n",
1169 private->size
, get
.size
);
1175 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1181 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1182 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1183 void __user
*counters_ptr
)
1187 struct xt_table_info
*oldinfo
;
1188 struct xt_counters
*counters
;
1189 const void *loc_cpu_old_entry
;
1190 struct ip6t_entry
*iter
;
1193 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1199 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1200 "ip6table_%s", name
);
1201 if (IS_ERR_OR_NULL(t
)) {
1202 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1203 goto free_newinfo_counters_untrans
;
1207 if (valid_hooks
!= t
->valid_hooks
) {
1208 duprintf("Valid hook crap: %08X vs %08X\n",
1209 valid_hooks
, t
->valid_hooks
);
1214 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1218 /* Update module usage count based on number of rules */
1219 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1220 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1221 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1222 (newinfo
->number
<= oldinfo
->initial_entries
))
1224 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1225 (newinfo
->number
<= oldinfo
->initial_entries
))
1228 /* Get the old counters, and synchronize with replace */
1229 get_counters(oldinfo
, counters
);
1231 /* Decrease module usage counts and free resource */
1232 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1233 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1234 cleanup_entry(iter
, net
);
1236 xt_free_table_info(oldinfo
);
1237 if (copy_to_user(counters_ptr
, counters
,
1238 sizeof(struct xt_counters
) * num_counters
) != 0) {
1239 /* Silent error, can't fail, new table is already in place */
1240 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1249 free_newinfo_counters_untrans
:
1256 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1259 struct ip6t_replace tmp
;
1260 struct xt_table_info
*newinfo
;
1261 void *loc_cpu_entry
;
1262 struct ip6t_entry
*iter
;
1264 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1267 /* overflow check */
1268 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1270 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1272 newinfo
= xt_alloc_table_info(tmp
.size
);
1276 /* choose the copy that is on our node/cpu */
1277 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1278 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1284 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1288 duprintf("ip_tables: Translated table\n");
1290 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1291 tmp
.num_counters
, tmp
.counters
);
1293 goto free_newinfo_untrans
;
1296 free_newinfo_untrans
:
1297 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1298 cleanup_entry(iter
, net
);
1300 xt_free_table_info(newinfo
);
1305 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1308 unsigned int i
, curcpu
;
1309 struct xt_counters_info tmp
;
1310 struct xt_counters
*paddc
;
1311 unsigned int num_counters
;
1316 const struct xt_table_info
*private;
1318 const void *loc_cpu_entry
;
1319 struct ip6t_entry
*iter
;
1320 unsigned int addend
;
1321 #ifdef CONFIG_COMPAT
1322 struct compat_xt_counters_info compat_tmp
;
1326 size
= sizeof(struct compat_xt_counters_info
);
1331 size
= sizeof(struct xt_counters_info
);
1334 if (copy_from_user(ptmp
, user
, size
) != 0)
1337 #ifdef CONFIG_COMPAT
1339 num_counters
= compat_tmp
.num_counters
;
1340 name
= compat_tmp
.name
;
1344 num_counters
= tmp
.num_counters
;
1348 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1351 paddc
= vmalloc(len
- size
);
1355 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1360 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1361 if (IS_ERR_OR_NULL(t
)) {
1362 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1368 private = t
->private;
1369 if (private->number
!= num_counters
) {
1371 goto unlock_up_free
;
1375 /* Choose the copy that is on our node */
1376 curcpu
= smp_processor_id();
1377 addend
= xt_write_recseq_begin();
1378 loc_cpu_entry
= private->entries
[curcpu
];
1379 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1380 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1383 xt_write_recseq_end(addend
);
1395 #ifdef CONFIG_COMPAT
1396 struct compat_ip6t_replace
{
1397 char name
[XT_TABLE_MAXNAMELEN
];
1401 u32 hook_entry
[NF_INET_NUMHOOKS
];
1402 u32 underflow
[NF_INET_NUMHOOKS
];
1404 compat_uptr_t counters
; /* struct xt_counters * */
1405 struct compat_ip6t_entry entries
[0];
1409 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1410 unsigned int *size
, struct xt_counters
*counters
,
1413 struct xt_entry_target
*t
;
1414 struct compat_ip6t_entry __user
*ce
;
1415 u_int16_t target_offset
, next_offset
;
1416 compat_uint_t origsize
;
1417 const struct xt_entry_match
*ematch
;
1421 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1422 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1423 copy_to_user(&ce
->counters
, &counters
[i
],
1424 sizeof(counters
[i
])) != 0)
1427 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1428 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1430 xt_ematch_foreach(ematch
, e
) {
1431 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1435 target_offset
= e
->target_offset
- (origsize
- *size
);
1436 t
= ip6t_get_target(e
);
1437 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1440 next_offset
= e
->next_offset
- (origsize
- *size
);
1441 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1442 put_user(next_offset
, &ce
->next_offset
) != 0)
1448 compat_find_calc_match(struct xt_entry_match
*m
,
1450 const struct ip6t_ip6
*ipv6
,
1451 unsigned int hookmask
,
1454 struct xt_match
*match
;
1456 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1457 m
->u
.user
.revision
);
1458 if (IS_ERR(match
)) {
1459 duprintf("compat_check_calc_match: `%s' not found\n",
1461 return PTR_ERR(match
);
1463 m
->u
.kernel
.match
= match
;
1464 *size
+= xt_compat_match_offset(match
);
1468 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1470 struct xt_entry_target
*t
;
1471 struct xt_entry_match
*ematch
;
1473 /* Cleanup all matches */
1474 xt_ematch_foreach(ematch
, e
)
1475 module_put(ematch
->u
.kernel
.match
->me
);
1476 t
= compat_ip6t_get_target(e
);
1477 module_put(t
->u
.kernel
.target
->me
);
1481 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1482 struct xt_table_info
*newinfo
,
1484 const unsigned char *base
,
1485 const unsigned char *limit
,
1486 const unsigned int *hook_entries
,
1487 const unsigned int *underflows
,
1490 struct xt_entry_match
*ematch
;
1491 struct xt_entry_target
*t
;
1492 struct xt_target
*target
;
1493 unsigned int entry_offset
;
1497 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1498 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1499 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1500 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1504 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1505 sizeof(struct compat_xt_entry_target
)) {
1506 duprintf("checking: element %p size %u\n",
1511 /* For purposes of check_entry casting the compat entry is fine */
1512 ret
= check_entry((struct ip6t_entry
*)e
);
1516 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1517 entry_offset
= (void *)e
- (void *)base
;
1519 xt_ematch_foreach(ematch
, e
) {
1520 ret
= compat_find_calc_match(ematch
, name
,
1521 &e
->ipv6
, e
->comefrom
, &off
);
1523 goto release_matches
;
1527 t
= compat_ip6t_get_target(e
);
1528 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1529 t
->u
.user
.revision
);
1530 if (IS_ERR(target
)) {
1531 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1533 ret
= PTR_ERR(target
);
1534 goto release_matches
;
1536 t
->u
.kernel
.target
= target
;
1538 off
+= xt_compat_target_offset(target
);
1540 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1544 /* Check hooks & underflows */
1545 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1546 if ((unsigned char *)e
- base
== hook_entries
[h
])
1547 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1548 if ((unsigned char *)e
- base
== underflows
[h
])
1549 newinfo
->underflow
[h
] = underflows
[h
];
1552 /* Clear counters and comefrom */
1553 memset(&e
->counters
, 0, sizeof(e
->counters
));
1558 module_put(t
->u
.kernel
.target
->me
);
1560 xt_ematch_foreach(ematch
, e
) {
1563 module_put(ematch
->u
.kernel
.match
->me
);
1569 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1570 unsigned int *size
, const char *name
,
1571 struct xt_table_info
*newinfo
, unsigned char *base
)
1573 struct xt_entry_target
*t
;
1574 struct ip6t_entry
*de
;
1575 unsigned int origsize
;
1577 struct xt_entry_match
*ematch
;
1581 de
= (struct ip6t_entry
*)*dstptr
;
1582 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1583 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1585 *dstptr
+= sizeof(struct ip6t_entry
);
1586 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1588 xt_ematch_foreach(ematch
, e
) {
1589 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1593 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1594 t
= compat_ip6t_get_target(e
);
1595 xt_compat_target_from_user(t
, dstptr
, size
);
1597 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1598 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1599 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1600 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1601 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1602 newinfo
->underflow
[h
] -= origsize
- *size
;
1607 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1612 struct xt_mtchk_param mtpar
;
1613 struct xt_entry_match
*ematch
;
1618 mtpar
.entryinfo
= &e
->ipv6
;
1619 mtpar
.hook_mask
= e
->comefrom
;
1620 mtpar
.family
= NFPROTO_IPV6
;
1621 xt_ematch_foreach(ematch
, e
) {
1622 ret
= check_match(ematch
, &mtpar
);
1624 goto cleanup_matches
;
1628 ret
= check_target(e
, net
, name
);
1630 goto cleanup_matches
;
1634 xt_ematch_foreach(ematch
, e
) {
1637 cleanup_match(ematch
, net
);
1643 translate_compat_table(struct net
*net
,
1645 unsigned int valid_hooks
,
1646 struct xt_table_info
**pinfo
,
1648 unsigned int total_size
,
1649 unsigned int number
,
1650 unsigned int *hook_entries
,
1651 unsigned int *underflows
)
1654 struct xt_table_info
*newinfo
, *info
;
1655 void *pos
, *entry0
, *entry1
;
1656 struct compat_ip6t_entry
*iter0
;
1657 struct ip6t_entry
*iter1
;
1664 info
->number
= number
;
1666 /* Init all hooks to impossible value. */
1667 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1668 info
->hook_entry
[i
] = 0xFFFFFFFF;
1669 info
->underflow
[i
] = 0xFFFFFFFF;
1672 duprintf("translate_compat_table: size %u\n", info
->size
);
1674 xt_compat_lock(AF_INET6
);
1675 xt_compat_init_offsets(AF_INET6
, number
);
1676 /* Walk through entries, checking offsets. */
1677 xt_entry_foreach(iter0
, entry0
, total_size
) {
1678 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1680 entry0
+ total_size
,
1691 duprintf("translate_compat_table: %u not %u entries\n",
1696 /* Check hooks all assigned */
1697 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1698 /* Only hooks which are valid */
1699 if (!(valid_hooks
& (1 << i
)))
1701 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1702 duprintf("Invalid hook entry %u %u\n",
1703 i
, hook_entries
[i
]);
1706 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1707 duprintf("Invalid underflow %u %u\n",
1714 newinfo
= xt_alloc_table_info(size
);
1718 newinfo
->number
= number
;
1719 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1720 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1721 newinfo
->underflow
[i
] = info
->underflow
[i
];
1723 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1726 xt_entry_foreach(iter0
, entry0
, total_size
) {
1727 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1728 name
, newinfo
, entry1
);
1732 xt_compat_flush_offsets(AF_INET6
);
1733 xt_compat_unlock(AF_INET6
);
1738 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1742 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1743 ret
= compat_check_entry(iter1
, net
, name
);
1747 if (strcmp(ip6t_get_target(iter1
)->u
.user
.name
,
1748 XT_ERROR_TARGET
) == 0)
1749 ++newinfo
->stacksize
;
1753 * The first i matches need cleanup_entry (calls ->destroy)
1754 * because they had called ->check already. The other j-i
1755 * entries need only release.
1759 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1764 compat_release_entry(iter0
);
1766 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1769 cleanup_entry(iter1
, net
);
1771 xt_free_table_info(newinfo
);
1775 /* And one copy for every other CPU */
1776 for_each_possible_cpu(i
)
1777 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1778 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1782 xt_free_table_info(info
);
1786 xt_free_table_info(newinfo
);
1788 xt_entry_foreach(iter0
, entry0
, total_size
) {
1791 compat_release_entry(iter0
);
1795 xt_compat_flush_offsets(AF_INET6
);
1796 xt_compat_unlock(AF_INET6
);
1801 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1804 struct compat_ip6t_replace tmp
;
1805 struct xt_table_info
*newinfo
;
1806 void *loc_cpu_entry
;
1807 struct ip6t_entry
*iter
;
1809 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1812 /* overflow check */
1813 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1815 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1817 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1819 newinfo
= xt_alloc_table_info(tmp
.size
);
1823 /* choose the copy that is on our node/cpu */
1824 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1825 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1831 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1832 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1833 tmp
.num_entries
, tmp
.hook_entry
,
1838 duprintf("compat_do_replace: Translated table\n");
1840 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1841 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1843 goto free_newinfo_untrans
;
1846 free_newinfo_untrans
:
1847 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1848 cleanup_entry(iter
, net
);
1850 xt_free_table_info(newinfo
);
1855 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1860 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1864 case IP6T_SO_SET_REPLACE
:
1865 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1868 case IP6T_SO_SET_ADD_COUNTERS
:
1869 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1873 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1880 struct compat_ip6t_get_entries
{
1881 char name
[XT_TABLE_MAXNAMELEN
];
1883 struct compat_ip6t_entry entrytable
[0];
1887 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1888 void __user
*userptr
)
1890 struct xt_counters
*counters
;
1891 const struct xt_table_info
*private = table
->private;
1895 const void *loc_cpu_entry
;
1897 struct ip6t_entry
*iter
;
1899 counters
= alloc_counters(table
);
1900 if (IS_ERR(counters
))
1901 return PTR_ERR(counters
);
1903 /* choose the copy that is on our node/cpu, ...
1904 * This choice is lazy (because current thread is
1905 * allowed to migrate to another cpu)
1907 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1910 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1911 ret
= compat_copy_entry_to_user(iter
, &pos
,
1912 &size
, counters
, i
++);
1922 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1926 struct compat_ip6t_get_entries get
;
1929 if (*len
< sizeof(get
)) {
1930 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1934 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1937 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1938 duprintf("compat_get_entries: %u != %zu\n",
1939 *len
, sizeof(get
) + get
.size
);
1943 xt_compat_lock(AF_INET6
);
1944 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1945 if (!IS_ERR_OR_NULL(t
)) {
1946 const struct xt_table_info
*private = t
->private;
1947 struct xt_table_info info
;
1948 duprintf("t->private->number = %u\n", private->number
);
1949 ret
= compat_table_info(private, &info
);
1950 if (!ret
&& get
.size
== info
.size
) {
1951 ret
= compat_copy_entries_to_user(private->size
,
1952 t
, uptr
->entrytable
);
1954 duprintf("compat_get_entries: I've got %u not %u!\n",
1955 private->size
, get
.size
);
1958 xt_compat_flush_offsets(AF_INET6
);
1962 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1964 xt_compat_unlock(AF_INET6
);
1968 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1971 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1975 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1979 case IP6T_SO_GET_INFO
:
1980 ret
= get_info(sock_net(sk
), user
, len
, 1);
1982 case IP6T_SO_GET_ENTRIES
:
1983 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1986 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
1993 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1997 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2001 case IP6T_SO_SET_REPLACE
:
2002 ret
= do_replace(sock_net(sk
), user
, len
);
2005 case IP6T_SO_SET_ADD_COUNTERS
:
2006 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2010 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2018 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2022 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2026 case IP6T_SO_GET_INFO
:
2027 ret
= get_info(sock_net(sk
), user
, len
, 0);
2030 case IP6T_SO_GET_ENTRIES
:
2031 ret
= get_entries(sock_net(sk
), user
, len
);
2034 case IP6T_SO_GET_REVISION_MATCH
:
2035 case IP6T_SO_GET_REVISION_TARGET
: {
2036 struct xt_get_revision rev
;
2039 if (*len
!= sizeof(rev
)) {
2043 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2047 rev
.name
[sizeof(rev
.name
)-1] = 0;
2049 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2054 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2057 "ip6t_%s", rev
.name
);
2062 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2069 struct xt_table
*ip6t_register_table(struct net
*net
,
2070 const struct xt_table
*table
,
2071 const struct ip6t_replace
*repl
)
2074 struct xt_table_info
*newinfo
;
2075 struct xt_table_info bootstrap
= {0};
2076 void *loc_cpu_entry
;
2077 struct xt_table
*new_table
;
2079 newinfo
= xt_alloc_table_info(repl
->size
);
2085 /* choose the copy on our node/cpu, but dont care about preemption */
2086 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2089 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2093 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2094 if (IS_ERR(new_table
)) {
2095 ret
= PTR_ERR(new_table
);
2101 xt_free_table_info(newinfo
);
2103 return ERR_PTR(ret
);
2106 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2108 struct xt_table_info
*private;
2109 void *loc_cpu_entry
;
2110 struct module
*table_owner
= table
->me
;
2111 struct ip6t_entry
*iter
;
2113 private = xt_unregister_table(table
);
2115 /* Decrease module usage counts and free resources */
2116 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2117 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2118 cleanup_entry(iter
, net
);
2119 if (private->number
> private->initial_entries
)
2120 module_put(table_owner
);
2121 xt_free_table_info(private);
2124 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2126 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2127 u_int8_t type
, u_int8_t code
,
2130 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2135 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2137 const struct icmp6hdr
*ic
;
2138 struct icmp6hdr _icmph
;
2139 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2141 /* Must not be a fragment. */
2142 if (par
->fragoff
!= 0)
2145 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2147 /* We've been asked to examine this packet, and we
2148 * can't. Hence, no choice but to drop.
2150 duprintf("Dropping evil ICMP tinygram.\n");
2151 par
->hotdrop
= true;
2155 return icmp6_type_code_match(icmpinfo
->type
,
2158 ic
->icmp6_type
, ic
->icmp6_code
,
2159 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2162 /* Called when user tries to insert an entry of this type. */
2163 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2165 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2167 /* Must specify no unknown invflags */
2168 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2171 /* The built-in targets: standard (NULL) and error. */
2172 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2174 .name
= XT_STANDARD_TARGET
,
2175 .targetsize
= sizeof(int),
2176 .family
= NFPROTO_IPV6
,
2177 #ifdef CONFIG_COMPAT
2178 .compatsize
= sizeof(compat_int_t
),
2179 .compat_from_user
= compat_standard_from_user
,
2180 .compat_to_user
= compat_standard_to_user
,
2184 .name
= XT_ERROR_TARGET
,
2185 .target
= ip6t_error
,
2186 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2187 .family
= NFPROTO_IPV6
,
2191 static struct nf_sockopt_ops ip6t_sockopts
= {
2193 .set_optmin
= IP6T_BASE_CTL
,
2194 .set_optmax
= IP6T_SO_SET_MAX
+1,
2195 .set
= do_ip6t_set_ctl
,
2196 #ifdef CONFIG_COMPAT
2197 .compat_set
= compat_do_ip6t_set_ctl
,
2199 .get_optmin
= IP6T_BASE_CTL
,
2200 .get_optmax
= IP6T_SO_GET_MAX
+1,
2201 .get
= do_ip6t_get_ctl
,
2202 #ifdef CONFIG_COMPAT
2203 .compat_get
= compat_do_ip6t_get_ctl
,
2205 .owner
= THIS_MODULE
,
2208 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2211 .match
= icmp6_match
,
2212 .matchsize
= sizeof(struct ip6t_icmp
),
2213 .checkentry
= icmp6_checkentry
,
2214 .proto
= IPPROTO_ICMPV6
,
2215 .family
= NFPROTO_IPV6
,
2219 static int __net_init
ip6_tables_net_init(struct net
*net
)
2221 return xt_proto_init(net
, NFPROTO_IPV6
);
2224 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2226 xt_proto_fini(net
, NFPROTO_IPV6
);
2229 static struct pernet_operations ip6_tables_net_ops
= {
2230 .init
= ip6_tables_net_init
,
2231 .exit
= ip6_tables_net_exit
,
2234 static int __init
ip6_tables_init(void)
2238 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2242 /* No one else will be downing sem now, so we won't sleep */
2243 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2246 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2250 /* Register setsockopt */
2251 ret
= nf_register_sockopt(&ip6t_sockopts
);
2255 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2259 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2261 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2263 unregister_pernet_subsys(&ip6_tables_net_ops
);
2268 static void __exit
ip6_tables_fini(void)
2270 nf_unregister_sockopt(&ip6t_sockopts
);
2272 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2273 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2274 unregister_pernet_subsys(&ip6_tables_net_ops
);
2277 EXPORT_SYMBOL(ip6t_register_table
);
2278 EXPORT_SYMBOL(ip6t_unregister_table
);
2279 EXPORT_SYMBOL(ip6t_do_table
);
2281 module_init(ip6_tables_init
);
2282 module_exit(ip6_tables_fini
);