2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
69 return xt_alloc_initial_table(ip6t
, IP6T
);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff
*skb
,
88 const struct ip6t_ip6
*ip6info
,
89 unsigned int *protoff
,
90 int *fragoff
, bool *hotdrop
)
93 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
98 &ip6info
->src
), IP6T_INV_SRCIP
) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
100 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
114 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev
, ip6info
->iniface
,
117 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
121 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
123 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev
, ip6info
->outiface
,
126 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info
->flags
& IP6T_F_PROTO
)) {
135 unsigned short _frag_off
;
137 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
143 *fragoff
= _frag_off
;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
150 if (ip6info
->proto
== protohdr
) {
151 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info
->proto
!= 0) &&
159 !(ip6info
->invflags
& IP6T_INV_PROTO
))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
169 if (ipv6
->flags
& ~IP6T_F_MASK
) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6
->flags
& ~IP6T_F_MASK
);
174 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6
->invflags
& ~IP6T_INV_MASK
);
183 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
185 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
190 static inline struct ip6t_entry
*
191 get_entry(const void *base
, unsigned int offset
)
193 return (struct ip6t_entry
*)(base
+ offset
);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry
*e
)
200 static const struct ip6t_ip6 uncond
;
202 return e
->target_offset
== sizeof(struct ip6t_entry
) &&
203 memcmp(&e
->ipv6
, &uncond
, sizeof(uncond
)) == 0;
206 static inline const struct xt_entry_target
*
207 ip6t_get_target_c(const struct ip6t_entry
*e
)
209 return ip6t_get_target((struct ip6t_entry
*)e
);
212 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
213 /* This cries for unification! */
214 static const char *const hooknames
[] = {
215 [NF_INET_PRE_ROUTING
] = "PREROUTING",
216 [NF_INET_LOCAL_IN
] = "INPUT",
217 [NF_INET_FORWARD
] = "FORWARD",
218 [NF_INET_LOCAL_OUT
] = "OUTPUT",
219 [NF_INET_POST_ROUTING
] = "POSTROUTING",
222 enum nf_ip_trace_comments
{
223 NF_IP6_TRACE_COMMENT_RULE
,
224 NF_IP6_TRACE_COMMENT_RETURN
,
225 NF_IP6_TRACE_COMMENT_POLICY
,
228 static const char *const comments
[] = {
229 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
230 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
231 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
234 static struct nf_loginfo trace_loginfo
= {
235 .type
= NF_LOG_TYPE_LOG
,
239 .logflags
= NF_LOG_MASK
,
244 /* Mildly perf critical (only if packet tracing is on) */
246 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
247 const char *hookname
, const char **chainname
,
248 const char **comment
, unsigned int *rulenum
)
250 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
252 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
253 /* Head of user chain: ERROR target with chainname */
254 *chainname
= t
->target
.data
;
259 if (unconditional(s
) &&
260 strcmp(t
->target
.u
.kernel
.target
->name
,
261 XT_STANDARD_TARGET
) == 0 &&
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment
= *chainname
== hookname
265 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
266 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
275 static void trace_packet(const struct sk_buff
*skb
,
277 const struct net_device
*in
,
278 const struct net_device
*out
,
279 const char *tablename
,
280 const struct xt_table_info
*private,
281 const struct ip6t_entry
*e
)
283 const void *table_base
;
284 const struct ip6t_entry
*root
;
285 const char *hookname
, *chainname
, *comment
;
286 const struct ip6t_entry
*iter
;
287 unsigned int rulenum
= 0;
288 struct net
*net
= dev_net(in
? in
: out
);
290 table_base
= private->entries
[smp_processor_id()];
291 root
= get_entry(table_base
, private->hook_entry
[hook
]);
293 hookname
= chainname
= hooknames
[hook
];
294 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
296 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
297 if (get_chainname_rulenum(iter
, e
, hookname
,
298 &chainname
, &comment
, &rulenum
) != 0)
301 nf_log_packet(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
302 "TRACE: %s:%s:%s:%u ",
303 tablename
, chainname
, comment
, rulenum
);
307 static inline __pure
struct ip6t_entry
*
308 ip6t_next_entry(const struct ip6t_entry
*entry
)
310 return (void *)entry
+ entry
->next_offset
;
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ip6t_do_table(struct sk_buff
*skb
,
317 const struct net_device
*in
,
318 const struct net_device
*out
,
319 struct xt_table
*table
)
321 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict
= NF_DROP
;
324 const char *indev
, *outdev
;
325 const void *table_base
;
326 struct ip6t_entry
*e
, **jumpstack
;
327 unsigned int *stackptr
, origptr
, cpu
;
328 const struct xt_table_info
*private;
329 struct xt_action_param acpar
;
333 indev
= in
? in
->name
: nulldevname
;
334 outdev
= out
? out
->name
: nulldevname
;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
341 acpar
.hotdrop
= false;
344 acpar
.family
= NFPROTO_IPV6
;
345 acpar
.hooknum
= hook
;
347 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
350 addend
= xt_write_recseq_begin();
351 private = table
->private;
352 cpu
= smp_processor_id();
353 table_base
= private->entries
[cpu
];
354 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
355 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
358 e
= get_entry(table_base
, private->hook_entry
[hook
]);
361 const struct xt_entry_target
*t
;
362 const struct xt_entry_match
*ematch
;
366 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
367 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
369 e
= ip6t_next_entry(e
);
373 xt_ematch_foreach(ematch
, e
) {
374 acpar
.match
= ematch
->u
.kernel
.match
;
375 acpar
.matchinfo
= ematch
->data
;
376 if (!acpar
.match
->match(skb
, &acpar
))
380 ADD_COUNTER(e
->counters
, skb
->len
, 1);
382 t
= ip6t_get_target_c(e
);
383 IP_NF_ASSERT(t
->u
.kernel
.target
);
385 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
386 /* The packet is traced: log it */
387 if (unlikely(skb
->nf_trace
))
388 trace_packet(skb
, hook
, in
, out
,
389 table
->name
, private, e
);
391 /* Standard target? */
392 if (!t
->u
.kernel
.target
->target
) {
395 v
= ((struct xt_standard_target
*)t
)->verdict
;
397 /* Pop from stack? */
398 if (v
!= XT_RETURN
) {
399 verdict
= (unsigned int)(-v
) - 1;
402 if (*stackptr
<= origptr
)
403 e
= get_entry(table_base
,
404 private->underflow
[hook
]);
406 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
409 if (table_base
+ v
!= ip6t_next_entry(e
) &&
410 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
411 if (*stackptr
>= private->stacksize
) {
415 jumpstack
[(*stackptr
)++] = e
;
418 e
= get_entry(table_base
, v
);
422 acpar
.target
= t
->u
.kernel
.target
;
423 acpar
.targinfo
= t
->data
;
425 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
426 if (verdict
== XT_CONTINUE
)
427 e
= ip6t_next_entry(e
);
431 } while (!acpar
.hotdrop
);
435 xt_write_recseq_end(addend
);
438 #ifdef DEBUG_ALLOW_ALL
447 /* Figures out from what hook each rule can be called: returns 0 if
448 there are loops. Puts hook bitmask in comefrom. */
450 mark_source_chains(const struct xt_table_info
*newinfo
,
451 unsigned int valid_hooks
, void *entry0
)
455 /* No recursion; use packet counter to save back ptrs (reset
456 to 0 as we leave), and comefrom to save source hook bitmask */
457 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
458 unsigned int pos
= newinfo
->hook_entry
[hook
];
459 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
461 if (!(valid_hooks
& (1 << hook
)))
464 /* Set initial back pointer. */
465 e
->counters
.pcnt
= pos
;
468 const struct xt_standard_target
*t
469 = (void *)ip6t_get_target_c(e
);
470 int visited
= e
->comefrom
& (1 << hook
);
472 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
473 pr_err("iptables: loop hook %u pos %u %08X.\n",
474 hook
, pos
, e
->comefrom
);
477 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
479 /* Unconditional return/END. */
480 if ((unconditional(e
) &&
481 (strcmp(t
->target
.u
.user
.name
,
482 XT_STANDARD_TARGET
) == 0) &&
483 t
->verdict
< 0) || visited
) {
484 unsigned int oldpos
, size
;
486 if ((strcmp(t
->target
.u
.user
.name
,
487 XT_STANDARD_TARGET
) == 0) &&
488 t
->verdict
< -NF_MAX_VERDICT
- 1) {
489 duprintf("mark_source_chains: bad "
490 "negative verdict (%i)\n",
495 /* Return: backtrack through the last
498 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
499 #ifdef DEBUG_IP_FIREWALL_USER
501 & (1 << NF_INET_NUMHOOKS
)) {
502 duprintf("Back unset "
509 pos
= e
->counters
.pcnt
;
510 e
->counters
.pcnt
= 0;
512 /* We're at the start. */
516 e
= (struct ip6t_entry
*)
518 } while (oldpos
== pos
+ e
->next_offset
);
521 size
= e
->next_offset
;
522 e
= (struct ip6t_entry
*)
523 (entry0
+ pos
+ size
);
524 if (pos
+ size
>= newinfo
->size
)
526 e
->counters
.pcnt
= pos
;
529 int newpos
= t
->verdict
;
531 if (strcmp(t
->target
.u
.user
.name
,
532 XT_STANDARD_TARGET
) == 0 &&
534 if (newpos
> newinfo
->size
-
535 sizeof(struct ip6t_entry
)) {
536 duprintf("mark_source_chains: "
537 "bad verdict (%i)\n",
541 /* This a jump; chase it. */
542 duprintf("Jump rule %u -> %u\n",
545 /* ... this is a fallthru */
546 newpos
= pos
+ e
->next_offset
;
547 if (newpos
>= newinfo
->size
)
550 e
= (struct ip6t_entry
*)
552 e
->counters
.pcnt
= pos
;
557 duprintf("Finished chain %u\n", hook
);
562 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
564 struct xt_mtdtor_param par
;
567 par
.match
= m
->u
.kernel
.match
;
568 par
.matchinfo
= m
->data
;
569 par
.family
= NFPROTO_IPV6
;
570 if (par
.match
->destroy
!= NULL
)
571 par
.match
->destroy(&par
);
572 module_put(par
.match
->me
);
575 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
577 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
580 par
->match
= m
->u
.kernel
.match
;
581 par
->matchinfo
= m
->data
;
583 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
584 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
586 duprintf("ip_tables: check failed for `%s'.\n",
594 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
596 struct xt_match
*match
;
599 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
602 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
603 return PTR_ERR(match
);
605 m
->u
.kernel
.match
= match
;
607 ret
= check_match(m
, par
);
613 module_put(m
->u
.kernel
.match
->me
);
617 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
619 struct xt_entry_target
*t
= ip6t_get_target(e
);
620 struct xt_tgchk_param par
= {
624 .target
= t
->u
.kernel
.target
,
626 .hook_mask
= e
->comefrom
,
627 .family
= NFPROTO_IPV6
,
631 t
= ip6t_get_target(e
);
632 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
633 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
635 duprintf("ip_tables: check failed for `%s'.\n",
636 t
->u
.kernel
.target
->name
);
643 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
646 struct xt_entry_target
*t
;
647 struct xt_target
*target
;
650 struct xt_mtchk_param mtpar
;
651 struct xt_entry_match
*ematch
;
656 mtpar
.entryinfo
= &e
->ipv6
;
657 mtpar
.hook_mask
= e
->comefrom
;
658 mtpar
.family
= NFPROTO_IPV6
;
659 xt_ematch_foreach(ematch
, e
) {
660 ret
= find_check_match(ematch
, &mtpar
);
662 goto cleanup_matches
;
666 t
= ip6t_get_target(e
);
667 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
669 if (IS_ERR(target
)) {
670 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
671 ret
= PTR_ERR(target
);
672 goto cleanup_matches
;
674 t
->u
.kernel
.target
= target
;
676 ret
= check_target(e
, net
, name
);
681 module_put(t
->u
.kernel
.target
->me
);
683 xt_ematch_foreach(ematch
, e
) {
686 cleanup_match(ematch
, net
);
691 static bool check_underflow(const struct ip6t_entry
*e
)
693 const struct xt_entry_target
*t
;
694 unsigned int verdict
;
696 if (!unconditional(e
))
698 t
= ip6t_get_target_c(e
);
699 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
701 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
702 verdict
= -verdict
- 1;
703 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
707 check_entry_size_and_hooks(struct ip6t_entry
*e
,
708 struct xt_table_info
*newinfo
,
709 const unsigned char *base
,
710 const unsigned char *limit
,
711 const unsigned int *hook_entries
,
712 const unsigned int *underflows
,
713 unsigned int valid_hooks
)
718 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
719 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
||
720 (unsigned char *)e
+ e
->next_offset
> limit
) {
721 duprintf("Bad offset %p\n", e
);
726 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
)) {
727 duprintf("checking: element %p size %u\n",
732 if (!ip6_checkentry(&e
->ipv6
))
735 err
= xt_check_entry_offsets(e
, e
->elems
, e
->target_offset
,
740 /* Check hooks & underflows */
741 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
742 if (!(valid_hooks
& (1 << h
)))
744 if ((unsigned char *)e
- base
== hook_entries
[h
])
745 newinfo
->hook_entry
[h
] = hook_entries
[h
];
746 if ((unsigned char *)e
- base
== underflows
[h
]) {
747 if (!check_underflow(e
)) {
748 pr_debug("Underflows must be unconditional and "
749 "use the STANDARD target with "
753 newinfo
->underflow
[h
] = underflows
[h
];
757 /* Clear counters and comefrom */
758 e
->counters
= ((struct xt_counters
) { 0, 0 });
763 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
765 struct xt_tgdtor_param par
;
766 struct xt_entry_target
*t
;
767 struct xt_entry_match
*ematch
;
769 /* Cleanup all matches */
770 xt_ematch_foreach(ematch
, e
)
771 cleanup_match(ematch
, net
);
772 t
= ip6t_get_target(e
);
775 par
.target
= t
->u
.kernel
.target
;
776 par
.targinfo
= t
->data
;
777 par
.family
= NFPROTO_IPV6
;
778 if (par
.target
->destroy
!= NULL
)
779 par
.target
->destroy(&par
);
780 module_put(par
.target
->me
);
783 /* Checks and translates the user-supplied table segment (held in
786 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
787 const struct ip6t_replace
*repl
)
789 struct ip6t_entry
*iter
;
793 newinfo
->size
= repl
->size
;
794 newinfo
->number
= repl
->num_entries
;
796 /* Init all hooks to impossible value. */
797 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
798 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
799 newinfo
->underflow
[i
] = 0xFFFFFFFF;
802 duprintf("translate_table: size %u\n", newinfo
->size
);
804 /* Walk through entries, checking offsets. */
805 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
806 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
814 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
815 XT_ERROR_TARGET
) == 0)
816 ++newinfo
->stacksize
;
819 if (i
!= repl
->num_entries
) {
820 duprintf("translate_table: %u not %u entries\n",
821 i
, repl
->num_entries
);
825 /* Check hooks all assigned */
826 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
827 /* Only hooks which are valid */
828 if (!(repl
->valid_hooks
& (1 << i
)))
830 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
831 duprintf("Invalid hook entry %u %u\n",
832 i
, repl
->hook_entry
[i
]);
835 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
836 duprintf("Invalid underflow %u %u\n",
837 i
, repl
->underflow
[i
]);
842 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
845 /* Finally, each sanity check must pass */
847 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
848 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
855 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
858 cleanup_entry(iter
, net
);
863 /* And one copy for every other CPU */
864 for_each_possible_cpu(i
) {
865 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
866 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
873 get_counters(const struct xt_table_info
*t
,
874 struct xt_counters counters
[])
876 struct ip6t_entry
*iter
;
880 for_each_possible_cpu(cpu
) {
881 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
884 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
889 start
= read_seqcount_begin(s
);
890 bcnt
= iter
->counters
.bcnt
;
891 pcnt
= iter
->counters
.pcnt
;
892 } while (read_seqcount_retry(s
, start
));
894 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
900 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
902 unsigned int countersize
;
903 struct xt_counters
*counters
;
904 const struct xt_table_info
*private = table
->private;
906 /* We need atomic snapshot of counters: rest doesn't change
907 (other than comefrom, which userspace doesn't care
909 countersize
= sizeof(struct xt_counters
) * private->number
;
910 counters
= vzalloc(countersize
);
912 if (counters
== NULL
)
913 return ERR_PTR(-ENOMEM
);
915 get_counters(private, counters
);
921 copy_entries_to_user(unsigned int total_size
,
922 const struct xt_table
*table
,
923 void __user
*userptr
)
925 unsigned int off
, num
;
926 const struct ip6t_entry
*e
;
927 struct xt_counters
*counters
;
928 const struct xt_table_info
*private = table
->private;
930 const void *loc_cpu_entry
;
932 counters
= alloc_counters(table
);
933 if (IS_ERR(counters
))
934 return PTR_ERR(counters
);
936 /* choose the copy that is on our node/cpu, ...
937 * This choice is lazy (because current thread is
938 * allowed to migrate to another cpu)
940 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
941 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
946 /* FIXME: use iterator macros --RR */
947 /* ... then go back and fix counters and names */
948 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
950 const struct xt_entry_match
*m
;
951 const struct xt_entry_target
*t
;
953 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
954 if (copy_to_user(userptr
+ off
955 + offsetof(struct ip6t_entry
, counters
),
957 sizeof(counters
[num
])) != 0) {
962 for (i
= sizeof(struct ip6t_entry
);
963 i
< e
->target_offset
;
964 i
+= m
->u
.match_size
) {
967 if (copy_to_user(userptr
+ off
+ i
968 + offsetof(struct xt_entry_match
,
970 m
->u
.kernel
.match
->name
,
971 strlen(m
->u
.kernel
.match
->name
)+1)
978 t
= ip6t_get_target_c(e
);
979 if (copy_to_user(userptr
+ off
+ e
->target_offset
980 + offsetof(struct xt_entry_target
,
982 t
->u
.kernel
.target
->name
,
983 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
995 static void compat_standard_from_user(void *dst
, const void *src
)
997 int v
= *(compat_int_t
*)src
;
1000 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1001 memcpy(dst
, &v
, sizeof(v
));
1004 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1006 compat_int_t cv
= *(int *)src
;
1009 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1010 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1013 static int compat_calc_entry(const struct ip6t_entry
*e
,
1014 const struct xt_table_info
*info
,
1015 const void *base
, struct xt_table_info
*newinfo
)
1017 const struct xt_entry_match
*ematch
;
1018 const struct xt_entry_target
*t
;
1019 unsigned int entry_offset
;
1022 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1023 entry_offset
= (void *)e
- base
;
1024 xt_ematch_foreach(ematch
, e
)
1025 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1026 t
= ip6t_get_target_c(e
);
1027 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1028 newinfo
->size
-= off
;
1029 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1033 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1034 if (info
->hook_entry
[i
] &&
1035 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1036 newinfo
->hook_entry
[i
] -= off
;
1037 if (info
->underflow
[i
] &&
1038 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1039 newinfo
->underflow
[i
] -= off
;
1044 static int compat_table_info(const struct xt_table_info
*info
,
1045 struct xt_table_info
*newinfo
)
1047 struct ip6t_entry
*iter
;
1048 void *loc_cpu_entry
;
1051 if (!newinfo
|| !info
)
1054 /* we dont care about newinfo->entries[] */
1055 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1056 newinfo
->initial_entries
= 0;
1057 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1058 xt_compat_init_offsets(AF_INET6
, info
->number
);
1059 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1060 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1068 static int get_info(struct net
*net
, void __user
*user
,
1069 const int *len
, int compat
)
1071 char name
[XT_TABLE_MAXNAMELEN
];
1075 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1076 duprintf("length %u != %zu\n", *len
,
1077 sizeof(struct ip6t_getinfo
));
1081 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1084 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1085 #ifdef CONFIG_COMPAT
1087 xt_compat_lock(AF_INET6
);
1089 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1090 "ip6table_%s", name
);
1091 if (!IS_ERR_OR_NULL(t
)) {
1092 struct ip6t_getinfo info
;
1093 const struct xt_table_info
*private = t
->private;
1094 #ifdef CONFIG_COMPAT
1095 struct xt_table_info tmp
;
1098 ret
= compat_table_info(private, &tmp
);
1099 xt_compat_flush_offsets(AF_INET6
);
1103 memset(&info
, 0, sizeof(info
));
1104 info
.valid_hooks
= t
->valid_hooks
;
1105 memcpy(info
.hook_entry
, private->hook_entry
,
1106 sizeof(info
.hook_entry
));
1107 memcpy(info
.underflow
, private->underflow
,
1108 sizeof(info
.underflow
));
1109 info
.num_entries
= private->number
;
1110 info
.size
= private->size
;
1111 strcpy(info
.name
, name
);
1113 if (copy_to_user(user
, &info
, *len
) != 0)
1121 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1122 #ifdef CONFIG_COMPAT
1124 xt_compat_unlock(AF_INET6
);
1130 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1134 struct ip6t_get_entries get
;
1137 if (*len
< sizeof(get
)) {
1138 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1141 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1143 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1144 duprintf("get_entries: %u != %zu\n",
1145 *len
, sizeof(get
) + get
.size
);
1149 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1150 if (!IS_ERR_OR_NULL(t
)) {
1151 struct xt_table_info
*private = t
->private;
1152 duprintf("t->private->number = %u\n", private->number
);
1153 if (get
.size
== private->size
)
1154 ret
= copy_entries_to_user(private->size
,
1155 t
, uptr
->entrytable
);
1157 duprintf("get_entries: I've got %u not %u!\n",
1158 private->size
, get
.size
);
1164 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1170 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1171 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1172 void __user
*counters_ptr
)
1176 struct xt_table_info
*oldinfo
;
1177 struct xt_counters
*counters
;
1178 const void *loc_cpu_old_entry
;
1179 struct ip6t_entry
*iter
;
1182 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1188 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1189 "ip6table_%s", name
);
1190 if (IS_ERR_OR_NULL(t
)) {
1191 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1192 goto free_newinfo_counters_untrans
;
1196 if (valid_hooks
!= t
->valid_hooks
) {
1197 duprintf("Valid hook crap: %08X vs %08X\n",
1198 valid_hooks
, t
->valid_hooks
);
1203 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1207 /* Update module usage count based on number of rules */
1208 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1209 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1210 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1211 (newinfo
->number
<= oldinfo
->initial_entries
))
1213 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1214 (newinfo
->number
<= oldinfo
->initial_entries
))
1217 /* Get the old counters, and synchronize with replace */
1218 get_counters(oldinfo
, counters
);
1220 /* Decrease module usage counts and free resource */
1221 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1222 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1223 cleanup_entry(iter
, net
);
1225 xt_free_table_info(oldinfo
);
1226 if (copy_to_user(counters_ptr
, counters
,
1227 sizeof(struct xt_counters
) * num_counters
) != 0) {
1228 /* Silent error, can't fail, new table is already in place */
1229 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1238 free_newinfo_counters_untrans
:
1245 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1248 struct ip6t_replace tmp
;
1249 struct xt_table_info
*newinfo
;
1250 void *loc_cpu_entry
;
1251 struct ip6t_entry
*iter
;
1253 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1256 /* overflow check */
1257 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1259 if (tmp
.num_counters
== 0)
1262 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1264 newinfo
= xt_alloc_table_info(tmp
.size
);
1268 /* choose the copy that is on our node/cpu */
1269 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1270 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1276 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1280 duprintf("ip_tables: Translated table\n");
1282 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1283 tmp
.num_counters
, tmp
.counters
);
1285 goto free_newinfo_untrans
;
1288 free_newinfo_untrans
:
1289 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1290 cleanup_entry(iter
, net
);
1292 xt_free_table_info(newinfo
);
1297 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1300 unsigned int i
, curcpu
;
1301 struct xt_counters_info tmp
;
1302 struct xt_counters
*paddc
;
1304 const struct xt_table_info
*private;
1306 const void *loc_cpu_entry
;
1307 struct ip6t_entry
*iter
;
1308 unsigned int addend
;
1310 paddc
= xt_copy_counters_from_user(user
, len
, &tmp
, compat
);
1312 return PTR_ERR(paddc
);
1313 t
= xt_find_table_lock(net
, AF_INET6
, tmp
.name
);
1314 if (IS_ERR_OR_NULL(t
)) {
1315 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1321 private = t
->private;
1322 if (private->number
!= tmp
.num_counters
) {
1324 goto unlock_up_free
;
1328 /* Choose the copy that is on our node */
1329 curcpu
= smp_processor_id();
1330 addend
= xt_write_recseq_begin();
1331 loc_cpu_entry
= private->entries
[curcpu
];
1332 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1333 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1336 xt_write_recseq_end(addend
);
1348 #ifdef CONFIG_COMPAT
1349 struct compat_ip6t_replace
{
1350 char name
[XT_TABLE_MAXNAMELEN
];
1354 u32 hook_entry
[NF_INET_NUMHOOKS
];
1355 u32 underflow
[NF_INET_NUMHOOKS
];
1357 compat_uptr_t counters
; /* struct xt_counters * */
1358 struct compat_ip6t_entry entries
[0];
1362 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1363 unsigned int *size
, struct xt_counters
*counters
,
1366 struct xt_entry_target
*t
;
1367 struct compat_ip6t_entry __user
*ce
;
1368 u_int16_t target_offset
, next_offset
;
1369 compat_uint_t origsize
;
1370 const struct xt_entry_match
*ematch
;
1374 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1375 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1376 copy_to_user(&ce
->counters
, &counters
[i
],
1377 sizeof(counters
[i
])) != 0)
1380 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1381 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1383 xt_ematch_foreach(ematch
, e
) {
1384 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1388 target_offset
= e
->target_offset
- (origsize
- *size
);
1389 t
= ip6t_get_target(e
);
1390 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1393 next_offset
= e
->next_offset
- (origsize
- *size
);
1394 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1395 put_user(next_offset
, &ce
->next_offset
) != 0)
1401 compat_find_calc_match(struct xt_entry_match
*m
,
1402 const struct ip6t_ip6
*ipv6
,
1403 unsigned int hookmask
,
1406 struct xt_match
*match
;
1408 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1409 m
->u
.user
.revision
);
1410 if (IS_ERR(match
)) {
1411 duprintf("compat_check_calc_match: `%s' not found\n",
1413 return PTR_ERR(match
);
1415 m
->u
.kernel
.match
= match
;
1416 *size
+= xt_compat_match_offset(match
);
1420 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1422 struct xt_entry_target
*t
;
1423 struct xt_entry_match
*ematch
;
1425 /* Cleanup all matches */
1426 xt_ematch_foreach(ematch
, e
)
1427 module_put(ematch
->u
.kernel
.match
->me
);
1428 t
= compat_ip6t_get_target(e
);
1429 module_put(t
->u
.kernel
.target
->me
);
1433 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1434 struct xt_table_info
*newinfo
,
1436 const unsigned char *base
,
1437 const unsigned char *limit
)
1439 struct xt_entry_match
*ematch
;
1440 struct xt_entry_target
*t
;
1441 struct xt_target
*target
;
1442 unsigned int entry_offset
;
1446 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1447 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1448 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
||
1449 (unsigned char *)e
+ e
->next_offset
> limit
) {
1450 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1454 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1455 sizeof(struct compat_xt_entry_target
)) {
1456 duprintf("checking: element %p size %u\n",
1461 if (!ip6_checkentry(&e
->ipv6
))
1464 ret
= xt_compat_check_entry_offsets(e
, e
->elems
,
1465 e
->target_offset
, e
->next_offset
);
1469 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1470 entry_offset
= (void *)e
- (void *)base
;
1472 xt_ematch_foreach(ematch
, e
) {
1473 ret
= compat_find_calc_match(ematch
, &e
->ipv6
, e
->comefrom
,
1476 goto release_matches
;
1480 t
= compat_ip6t_get_target(e
);
1481 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1482 t
->u
.user
.revision
);
1483 if (IS_ERR(target
)) {
1484 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1486 ret
= PTR_ERR(target
);
1487 goto release_matches
;
1489 t
->u
.kernel
.target
= target
;
1491 off
+= xt_compat_target_offset(target
);
1493 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1500 module_put(t
->u
.kernel
.target
->me
);
1502 xt_ematch_foreach(ematch
, e
) {
1505 module_put(ematch
->u
.kernel
.match
->me
);
1511 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1513 struct xt_table_info
*newinfo
, unsigned char *base
)
1515 struct xt_entry_target
*t
;
1516 struct ip6t_entry
*de
;
1517 unsigned int origsize
;
1519 struct xt_entry_match
*ematch
;
1522 de
= (struct ip6t_entry
*)*dstptr
;
1523 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1524 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1526 *dstptr
+= sizeof(struct ip6t_entry
);
1527 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1529 xt_ematch_foreach(ematch
, e
)
1530 xt_compat_match_from_user(ematch
, dstptr
, size
);
1532 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1533 t
= compat_ip6t_get_target(e
);
1534 xt_compat_target_from_user(t
, dstptr
, size
);
1536 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1537 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1538 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1539 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1540 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1541 newinfo
->underflow
[h
] -= origsize
- *size
;
1546 translate_compat_table(struct net
*net
,
1547 struct xt_table_info
**pinfo
,
1549 const struct compat_ip6t_replace
*compatr
)
1552 struct xt_table_info
*newinfo
, *info
;
1553 void *pos
, *entry0
, *entry1
;
1554 struct compat_ip6t_entry
*iter0
;
1555 struct ip6t_replace repl
;
1561 size
= compatr
->size
;
1562 info
->number
= compatr
->num_entries
;
1564 duprintf("translate_compat_table: size %u\n", info
->size
);
1566 xt_compat_lock(AF_INET6
);
1567 xt_compat_init_offsets(AF_INET6
, compatr
->num_entries
);
1568 /* Walk through entries, checking offsets. */
1569 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1570 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1572 entry0
+ compatr
->size
);
1579 if (j
!= compatr
->num_entries
) {
1580 duprintf("translate_compat_table: %u not %u entries\n",
1581 j
, compatr
->num_entries
);
1586 newinfo
= xt_alloc_table_info(size
);
1590 newinfo
->number
= compatr
->num_entries
;
1591 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1592 newinfo
->hook_entry
[i
] = compatr
->hook_entry
[i
];
1593 newinfo
->underflow
[i
] = compatr
->underflow
[i
];
1595 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1597 size
= compatr
->size
;
1598 xt_entry_foreach(iter0
, entry0
, compatr
->size
)
1599 compat_copy_entry_from_user(iter0
, &pos
, &size
,
1602 /* all module references in entry0 are now gone. */
1603 xt_compat_flush_offsets(AF_INET6
);
1604 xt_compat_unlock(AF_INET6
);
1606 memcpy(&repl
, compatr
, sizeof(*compatr
));
1608 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1609 repl
.hook_entry
[i
] = newinfo
->hook_entry
[i
];
1610 repl
.underflow
[i
] = newinfo
->underflow
[i
];
1613 repl
.num_counters
= 0;
1614 repl
.counters
= NULL
;
1615 repl
.size
= newinfo
->size
;
1616 ret
= translate_table(net
, newinfo
, entry1
, &repl
);
1622 xt_free_table_info(info
);
1626 xt_free_table_info(newinfo
);
1629 xt_compat_flush_offsets(AF_INET6
);
1630 xt_compat_unlock(AF_INET6
);
1631 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1634 compat_release_entry(iter0
);
1640 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1643 struct compat_ip6t_replace tmp
;
1644 struct xt_table_info
*newinfo
;
1645 void *loc_cpu_entry
;
1646 struct ip6t_entry
*iter
;
1648 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1651 /* overflow check */
1652 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1654 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1656 if (tmp
.num_counters
== 0)
1659 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1661 newinfo
= xt_alloc_table_info(tmp
.size
);
1665 /* choose the copy that is on our node/cpu */
1666 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1667 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1673 ret
= translate_compat_table(net
, &newinfo
, &loc_cpu_entry
, &tmp
);
1677 duprintf("compat_do_replace: Translated table\n");
1679 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1680 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1682 goto free_newinfo_untrans
;
1685 free_newinfo_untrans
:
1686 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1687 cleanup_entry(iter
, net
);
1689 xt_free_table_info(newinfo
);
1694 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1699 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1703 case IP6T_SO_SET_REPLACE
:
1704 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1707 case IP6T_SO_SET_ADD_COUNTERS
:
1708 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1712 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1719 struct compat_ip6t_get_entries
{
1720 char name
[XT_TABLE_MAXNAMELEN
];
1722 struct compat_ip6t_entry entrytable
[0];
1726 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1727 void __user
*userptr
)
1729 struct xt_counters
*counters
;
1730 const struct xt_table_info
*private = table
->private;
1734 const void *loc_cpu_entry
;
1736 struct ip6t_entry
*iter
;
1738 counters
= alloc_counters(table
);
1739 if (IS_ERR(counters
))
1740 return PTR_ERR(counters
);
1742 /* choose the copy that is on our node/cpu, ...
1743 * This choice is lazy (because current thread is
1744 * allowed to migrate to another cpu)
1746 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1749 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1750 ret
= compat_copy_entry_to_user(iter
, &pos
,
1751 &size
, counters
, i
++);
1761 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1765 struct compat_ip6t_get_entries get
;
1768 if (*len
< sizeof(get
)) {
1769 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1773 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1776 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1777 duprintf("compat_get_entries: %u != %zu\n",
1778 *len
, sizeof(get
) + get
.size
);
1782 xt_compat_lock(AF_INET6
);
1783 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1784 if (!IS_ERR_OR_NULL(t
)) {
1785 const struct xt_table_info
*private = t
->private;
1786 struct xt_table_info info
;
1787 duprintf("t->private->number = %u\n", private->number
);
1788 ret
= compat_table_info(private, &info
);
1789 if (!ret
&& get
.size
== info
.size
) {
1790 ret
= compat_copy_entries_to_user(private->size
,
1791 t
, uptr
->entrytable
);
1793 duprintf("compat_get_entries: I've got %u not %u!\n",
1794 private->size
, get
.size
);
1797 xt_compat_flush_offsets(AF_INET6
);
1801 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1803 xt_compat_unlock(AF_INET6
);
1807 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1810 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1814 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1818 case IP6T_SO_GET_INFO
:
1819 ret
= get_info(sock_net(sk
), user
, len
, 1);
1821 case IP6T_SO_GET_ENTRIES
:
1822 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1825 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
1832 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1836 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1840 case IP6T_SO_SET_REPLACE
:
1841 ret
= do_replace(sock_net(sk
), user
, len
);
1844 case IP6T_SO_SET_ADD_COUNTERS
:
1845 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1849 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1857 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1861 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1865 case IP6T_SO_GET_INFO
:
1866 ret
= get_info(sock_net(sk
), user
, len
, 0);
1869 case IP6T_SO_GET_ENTRIES
:
1870 ret
= get_entries(sock_net(sk
), user
, len
);
1873 case IP6T_SO_GET_REVISION_MATCH
:
1874 case IP6T_SO_GET_REVISION_TARGET
: {
1875 struct xt_get_revision rev
;
1878 if (*len
!= sizeof(rev
)) {
1882 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1886 rev
.name
[sizeof(rev
.name
)-1] = 0;
1888 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
1893 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
1896 "ip6t_%s", rev
.name
);
1901 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
1908 struct xt_table
*ip6t_register_table(struct net
*net
,
1909 const struct xt_table
*table
,
1910 const struct ip6t_replace
*repl
)
1913 struct xt_table_info
*newinfo
;
1914 struct xt_table_info bootstrap
= {0};
1915 void *loc_cpu_entry
;
1916 struct xt_table
*new_table
;
1918 newinfo
= xt_alloc_table_info(repl
->size
);
1924 /* choose the copy on our node/cpu, but dont care about preemption */
1925 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1926 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1928 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
1932 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1933 if (IS_ERR(new_table
)) {
1934 ret
= PTR_ERR(new_table
);
1940 xt_free_table_info(newinfo
);
1942 return ERR_PTR(ret
);
1945 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
1947 struct xt_table_info
*private;
1948 void *loc_cpu_entry
;
1949 struct module
*table_owner
= table
->me
;
1950 struct ip6t_entry
*iter
;
1952 private = xt_unregister_table(table
);
1954 /* Decrease module usage counts and free resources */
1955 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1956 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
1957 cleanup_entry(iter
, net
);
1958 if (private->number
> private->initial_entries
)
1959 module_put(table_owner
);
1960 xt_free_table_info(private);
1963 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1965 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
1966 u_int8_t type
, u_int8_t code
,
1969 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
1974 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
1976 const struct icmp6hdr
*ic
;
1977 struct icmp6hdr _icmph
;
1978 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
1980 /* Must not be a fragment. */
1981 if (par
->fragoff
!= 0)
1984 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
1986 /* We've been asked to examine this packet, and we
1987 * can't. Hence, no choice but to drop.
1989 duprintf("Dropping evil ICMP tinygram.\n");
1990 par
->hotdrop
= true;
1994 return icmp6_type_code_match(icmpinfo
->type
,
1997 ic
->icmp6_type
, ic
->icmp6_code
,
1998 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2001 /* Called when user tries to insert an entry of this type. */
2002 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2004 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2006 /* Must specify no unknown invflags */
2007 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2010 /* The built-in targets: standard (NULL) and error. */
2011 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2013 .name
= XT_STANDARD_TARGET
,
2014 .targetsize
= sizeof(int),
2015 .family
= NFPROTO_IPV6
,
2016 #ifdef CONFIG_COMPAT
2017 .compatsize
= sizeof(compat_int_t
),
2018 .compat_from_user
= compat_standard_from_user
,
2019 .compat_to_user
= compat_standard_to_user
,
2023 .name
= XT_ERROR_TARGET
,
2024 .target
= ip6t_error
,
2025 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2026 .family
= NFPROTO_IPV6
,
2030 static struct nf_sockopt_ops ip6t_sockopts
= {
2032 .set_optmin
= IP6T_BASE_CTL
,
2033 .set_optmax
= IP6T_SO_SET_MAX
+1,
2034 .set
= do_ip6t_set_ctl
,
2035 #ifdef CONFIG_COMPAT
2036 .compat_set
= compat_do_ip6t_set_ctl
,
2038 .get_optmin
= IP6T_BASE_CTL
,
2039 .get_optmax
= IP6T_SO_GET_MAX
+1,
2040 .get
= do_ip6t_get_ctl
,
2041 #ifdef CONFIG_COMPAT
2042 .compat_get
= compat_do_ip6t_get_ctl
,
2044 .owner
= THIS_MODULE
,
2047 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2050 .match
= icmp6_match
,
2051 .matchsize
= sizeof(struct ip6t_icmp
),
2052 .checkentry
= icmp6_checkentry
,
2053 .proto
= IPPROTO_ICMPV6
,
2054 .family
= NFPROTO_IPV6
,
2058 static int __net_init
ip6_tables_net_init(struct net
*net
)
2060 return xt_proto_init(net
, NFPROTO_IPV6
);
2063 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2065 xt_proto_fini(net
, NFPROTO_IPV6
);
2068 static struct pernet_operations ip6_tables_net_ops
= {
2069 .init
= ip6_tables_net_init
,
2070 .exit
= ip6_tables_net_exit
,
2073 static int __init
ip6_tables_init(void)
2077 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2081 /* No one else will be downing sem now, so we won't sleep */
2082 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2085 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2089 /* Register setsockopt */
2090 ret
= nf_register_sockopt(&ip6t_sockopts
);
2094 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2098 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2100 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2102 unregister_pernet_subsys(&ip6_tables_net_ops
);
2107 static void __exit
ip6_tables_fini(void)
2109 nf_unregister_sockopt(&ip6t_sockopts
);
2111 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2112 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2113 unregister_pernet_subsys(&ip6_tables_net_ops
);
2116 EXPORT_SYMBOL(ip6t_register_table
);
2117 EXPORT_SYMBOL(ip6t_unregister_table
);
2118 EXPORT_SYMBOL(ip6t_do_table
);
2120 module_init(ip6_tables_init
);
2121 module_exit(ip6_tables_fini
);