2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
46 #define dprintf(format, args...)
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
52 #define duprintf(format, args...)
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
67 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
69 return xt_alloc_initial_table(ip6t
, IP6T
);
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
80 Hence the start of any table is given by get_table() below. */
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
85 ip6_packet_match(const struct sk_buff
*skb
,
88 const struct ip6t_ip6
*ip6info
,
89 unsigned int *protoff
,
90 int *fragoff
, bool *hotdrop
)
93 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
98 &ip6info
->src
), IP6T_INV_SRCIP
) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
100 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
101 dprintf("Source or dest mismatch.\n");
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
114 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev
, ip6info
->iniface
,
117 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
121 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
123 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev
, ip6info
->outiface
,
126 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
130 /* ... might want to do something with class and flowlabel here ... */
132 /* look for the desired protocol header */
133 if((ip6info
->flags
& IP6T_F_PROTO
)) {
135 unsigned short _frag_off
;
137 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
143 *fragoff
= _frag_off
;
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
147 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
150 if (ip6info
->proto
== protohdr
) {
151 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
157 /* We need match for the '-p all', too! */
158 if ((ip6info
->proto
!= 0) &&
159 !(ip6info
->invflags
& IP6T_INV_PROTO
))
165 /* should be ip6 safe */
167 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
169 if (ipv6
->flags
& ~IP6T_F_MASK
) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6
->flags
& ~IP6T_F_MASK
);
174 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6
->invflags
& ~IP6T_INV_MASK
);
183 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
185 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
190 static inline struct ip6t_entry
*
191 get_entry(const void *base
, unsigned int offset
)
193 return (struct ip6t_entry
*)(base
+ offset
);
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_entry
*e
)
200 static const struct ip6t_ip6 uncond
;
202 return e
->target_offset
== sizeof(struct ip6t_entry
) &&
203 memcmp(&e
->ipv6
, &uncond
, sizeof(uncond
)) == 0;
207 static inline const struct xt_entry_target
*
208 ip6t_get_target_c(const struct ip6t_entry
*e
)
210 return ip6t_get_target((struct ip6t_entry
*)e
);
213 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
214 /* This cries for unification! */
215 static const char *const hooknames
[] = {
216 [NF_INET_PRE_ROUTING
] = "PREROUTING",
217 [NF_INET_LOCAL_IN
] = "INPUT",
218 [NF_INET_FORWARD
] = "FORWARD",
219 [NF_INET_LOCAL_OUT
] = "OUTPUT",
220 [NF_INET_POST_ROUTING
] = "POSTROUTING",
223 enum nf_ip_trace_comments
{
224 NF_IP6_TRACE_COMMENT_RULE
,
225 NF_IP6_TRACE_COMMENT_RETURN
,
226 NF_IP6_TRACE_COMMENT_POLICY
,
229 static const char *const comments
[] = {
230 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
231 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
232 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
235 static struct nf_loginfo trace_loginfo
= {
236 .type
= NF_LOG_TYPE_LOG
,
240 .logflags
= NF_LOG_MASK
,
245 /* Mildly perf critical (only if packet tracing is on) */
247 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
248 const char *hookname
, const char **chainname
,
249 const char **comment
, unsigned int *rulenum
)
251 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
253 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
254 /* Head of user chain: ERROR target with chainname */
255 *chainname
= t
->target
.data
;
260 if (unconditional(s
) &&
261 strcmp(t
->target
.u
.kernel
.target
->name
,
262 XT_STANDARD_TARGET
) == 0 &&
264 /* Tail of chains: STANDARD target (return/policy) */
265 *comment
= *chainname
== hookname
266 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
267 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
276 static void trace_packet(const struct sk_buff
*skb
,
278 const struct net_device
*in
,
279 const struct net_device
*out
,
280 const char *tablename
,
281 const struct xt_table_info
*private,
282 const struct ip6t_entry
*e
)
284 const void *table_base
;
285 const struct ip6t_entry
*root
;
286 const char *hookname
, *chainname
, *comment
;
287 const struct ip6t_entry
*iter
;
288 unsigned int rulenum
= 0;
289 struct net
*net
= dev_net(in
? in
: out
);
291 table_base
= private->entries
[smp_processor_id()];
292 root
= get_entry(table_base
, private->hook_entry
[hook
]);
294 hookname
= chainname
= hooknames
[hook
];
295 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
297 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
298 if (get_chainname_rulenum(iter
, e
, hookname
,
299 &chainname
, &comment
, &rulenum
) != 0)
302 nf_log_packet(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
303 "TRACE: %s:%s:%s:%u ",
304 tablename
, chainname
, comment
, rulenum
);
308 static inline __pure
struct ip6t_entry
*
309 ip6t_next_entry(const struct ip6t_entry
*entry
)
311 return (void *)entry
+ entry
->next_offset
;
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 ip6t_do_table(struct sk_buff
*skb
,
318 const struct net_device
*in
,
319 const struct net_device
*out
,
320 struct xt_table
*table
)
322 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
323 /* Initializing verdict to NF_DROP keeps gcc happy. */
324 unsigned int verdict
= NF_DROP
;
325 const char *indev
, *outdev
;
326 const void *table_base
;
327 struct ip6t_entry
*e
, **jumpstack
;
328 unsigned int *stackptr
, origptr
, cpu
;
329 const struct xt_table_info
*private;
330 struct xt_action_param acpar
;
334 indev
= in
? in
->name
: nulldevname
;
335 outdev
= out
? out
->name
: nulldevname
;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
342 acpar
.hotdrop
= false;
345 acpar
.family
= NFPROTO_IPV6
;
346 acpar
.hooknum
= hook
;
348 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
351 addend
= xt_write_recseq_begin();
352 private = table
->private;
353 cpu
= smp_processor_id();
354 table_base
= private->entries
[cpu
];
355 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
356 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
359 e
= get_entry(table_base
, private->hook_entry
[hook
]);
362 const struct xt_entry_target
*t
;
363 const struct xt_entry_match
*ematch
;
367 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
368 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
370 e
= ip6t_next_entry(e
);
374 xt_ematch_foreach(ematch
, e
) {
375 acpar
.match
= ematch
->u
.kernel
.match
;
376 acpar
.matchinfo
= ematch
->data
;
377 if (!acpar
.match
->match(skb
, &acpar
))
381 ADD_COUNTER(e
->counters
, skb
->len
, 1);
383 t
= ip6t_get_target_c(e
);
384 IP_NF_ASSERT(t
->u
.kernel
.target
);
386 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
387 /* The packet is traced: log it */
388 if (unlikely(skb
->nf_trace
))
389 trace_packet(skb
, hook
, in
, out
,
390 table
->name
, private, e
);
392 /* Standard target? */
393 if (!t
->u
.kernel
.target
->target
) {
396 v
= ((struct xt_standard_target
*)t
)->verdict
;
398 /* Pop from stack? */
399 if (v
!= XT_RETURN
) {
400 verdict
= (unsigned int)(-v
) - 1;
403 if (*stackptr
<= origptr
)
404 e
= get_entry(table_base
,
405 private->underflow
[hook
]);
407 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
410 if (table_base
+ v
!= ip6t_next_entry(e
) &&
411 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
412 if (*stackptr
>= private->stacksize
) {
416 jumpstack
[(*stackptr
)++] = e
;
419 e
= get_entry(table_base
, v
);
423 acpar
.target
= t
->u
.kernel
.target
;
424 acpar
.targinfo
= t
->data
;
426 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
427 if (verdict
== XT_CONTINUE
)
428 e
= ip6t_next_entry(e
);
432 } while (!acpar
.hotdrop
);
436 xt_write_recseq_end(addend
);
439 #ifdef DEBUG_ALLOW_ALL
448 /* Figures out from what hook each rule can be called: returns 0 if
449 there are loops. Puts hook bitmask in comefrom. */
451 mark_source_chains(const struct xt_table_info
*newinfo
,
452 unsigned int valid_hooks
, void *entry0
)
456 /* No recursion; use packet counter to save back ptrs (reset
457 to 0 as we leave), and comefrom to save source hook bitmask */
458 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
459 unsigned int pos
= newinfo
->hook_entry
[hook
];
460 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
462 if (!(valid_hooks
& (1 << hook
)))
465 /* Set initial back pointer. */
466 e
->counters
.pcnt
= pos
;
469 const struct xt_standard_target
*t
470 = (void *)ip6t_get_target_c(e
);
471 int visited
= e
->comefrom
& (1 << hook
);
473 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
474 pr_err("iptables: loop hook %u pos %u %08X.\n",
475 hook
, pos
, e
->comefrom
);
478 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
480 /* Unconditional return/END. */
481 if ((unconditional(e
) &&
482 (strcmp(t
->target
.u
.user
.name
,
483 XT_STANDARD_TARGET
) == 0) &&
484 t
->verdict
< 0) || visited
) {
485 unsigned int oldpos
, size
;
487 if ((strcmp(t
->target
.u
.user
.name
,
488 XT_STANDARD_TARGET
) == 0) &&
489 t
->verdict
< -NF_MAX_VERDICT
- 1) {
490 duprintf("mark_source_chains: bad "
491 "negative verdict (%i)\n",
496 /* Return: backtrack through the last
499 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
500 #ifdef DEBUG_IP_FIREWALL_USER
502 & (1 << NF_INET_NUMHOOKS
)) {
503 duprintf("Back unset "
510 pos
= e
->counters
.pcnt
;
511 e
->counters
.pcnt
= 0;
513 /* We're at the start. */
517 e
= (struct ip6t_entry
*)
519 } while (oldpos
== pos
+ e
->next_offset
);
522 size
= e
->next_offset
;
523 e
= (struct ip6t_entry
*)
524 (entry0
+ pos
+ size
);
525 if (pos
+ size
>= newinfo
->size
)
527 e
->counters
.pcnt
= pos
;
530 int newpos
= t
->verdict
;
532 if (strcmp(t
->target
.u
.user
.name
,
533 XT_STANDARD_TARGET
) == 0 &&
535 if (newpos
> newinfo
->size
-
536 sizeof(struct ip6t_entry
)) {
537 duprintf("mark_source_chains: "
538 "bad verdict (%i)\n",
542 /* This a jump; chase it. */
543 duprintf("Jump rule %u -> %u\n",
546 /* ... this is a fallthru */
547 newpos
= pos
+ e
->next_offset
;
548 if (newpos
>= newinfo
->size
)
551 e
= (struct ip6t_entry
*)
553 e
->counters
.pcnt
= pos
;
558 duprintf("Finished chain %u\n", hook
);
563 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
565 struct xt_mtdtor_param par
;
568 par
.match
= m
->u
.kernel
.match
;
569 par
.matchinfo
= m
->data
;
570 par
.family
= NFPROTO_IPV6
;
571 if (par
.match
->destroy
!= NULL
)
572 par
.match
->destroy(&par
);
573 module_put(par
.match
->me
);
577 check_entry(const struct ip6t_entry
*e
)
579 const struct xt_entry_target
*t
;
581 if (!ip6_checkentry(&e
->ipv6
))
584 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
588 t
= ip6t_get_target_c(e
);
589 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
595 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
597 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
600 par
->match
= m
->u
.kernel
.match
;
601 par
->matchinfo
= m
->data
;
603 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
604 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
606 duprintf("ip_tables: check failed for `%s'.\n",
614 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
616 struct xt_match
*match
;
619 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
622 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
623 return PTR_ERR(match
);
625 m
->u
.kernel
.match
= match
;
627 ret
= check_match(m
, par
);
633 module_put(m
->u
.kernel
.match
->me
);
637 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
639 struct xt_entry_target
*t
= ip6t_get_target(e
);
640 struct xt_tgchk_param par
= {
644 .target
= t
->u
.kernel
.target
,
646 .hook_mask
= e
->comefrom
,
647 .family
= NFPROTO_IPV6
,
651 t
= ip6t_get_target(e
);
652 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
653 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
655 duprintf("ip_tables: check failed for `%s'.\n",
656 t
->u
.kernel
.target
->name
);
663 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
666 struct xt_entry_target
*t
;
667 struct xt_target
*target
;
670 struct xt_mtchk_param mtpar
;
671 struct xt_entry_match
*ematch
;
676 mtpar
.entryinfo
= &e
->ipv6
;
677 mtpar
.hook_mask
= e
->comefrom
;
678 mtpar
.family
= NFPROTO_IPV6
;
679 xt_ematch_foreach(ematch
, e
) {
680 ret
= find_check_match(ematch
, &mtpar
);
682 goto cleanup_matches
;
686 t
= ip6t_get_target(e
);
687 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
689 if (IS_ERR(target
)) {
690 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
691 ret
= PTR_ERR(target
);
692 goto cleanup_matches
;
694 t
->u
.kernel
.target
= target
;
696 ret
= check_target(e
, net
, name
);
701 module_put(t
->u
.kernel
.target
->me
);
703 xt_ematch_foreach(ematch
, e
) {
706 cleanup_match(ematch
, net
);
711 static bool check_underflow(const struct ip6t_entry
*e
)
713 const struct xt_entry_target
*t
;
714 unsigned int verdict
;
716 if (!unconditional(e
))
718 t
= ip6t_get_target_c(e
);
719 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
721 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
722 verdict
= -verdict
- 1;
723 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
727 check_entry_size_and_hooks(struct ip6t_entry
*e
,
728 struct xt_table_info
*newinfo
,
729 const unsigned char *base
,
730 const unsigned char *limit
,
731 const unsigned int *hook_entries
,
732 const unsigned int *underflows
,
733 unsigned int valid_hooks
)
738 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
739 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
||
740 (unsigned char *)e
+ e
->next_offset
> limit
) {
741 duprintf("Bad offset %p\n", e
);
746 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
)) {
747 duprintf("checking: element %p size %u\n",
752 err
= check_entry(e
);
757 /* Check hooks & underflows */
758 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
759 if (!(valid_hooks
& (1 << h
)))
761 if ((unsigned char *)e
- base
== hook_entries
[h
])
762 newinfo
->hook_entry
[h
] = hook_entries
[h
];
763 if ((unsigned char *)e
- base
== underflows
[h
]) {
764 if (!check_underflow(e
)) {
765 pr_debug("Underflows must be unconditional and "
766 "use the STANDARD target with "
770 newinfo
->underflow
[h
] = underflows
[h
];
774 /* Clear counters and comefrom */
775 e
->counters
= ((struct xt_counters
) { 0, 0 });
780 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
782 struct xt_tgdtor_param par
;
783 struct xt_entry_target
*t
;
784 struct xt_entry_match
*ematch
;
786 /* Cleanup all matches */
787 xt_ematch_foreach(ematch
, e
)
788 cleanup_match(ematch
, net
);
789 t
= ip6t_get_target(e
);
792 par
.target
= t
->u
.kernel
.target
;
793 par
.targinfo
= t
->data
;
794 par
.family
= NFPROTO_IPV6
;
795 if (par
.target
->destroy
!= NULL
)
796 par
.target
->destroy(&par
);
797 module_put(par
.target
->me
);
800 /* Checks and translates the user-supplied table segment (held in
803 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
804 const struct ip6t_replace
*repl
)
806 struct ip6t_entry
*iter
;
810 newinfo
->size
= repl
->size
;
811 newinfo
->number
= repl
->num_entries
;
813 /* Init all hooks to impossible value. */
814 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
815 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
816 newinfo
->underflow
[i
] = 0xFFFFFFFF;
819 duprintf("translate_table: size %u\n", newinfo
->size
);
821 /* Walk through entries, checking offsets. */
822 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
823 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
831 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
832 XT_ERROR_TARGET
) == 0)
833 ++newinfo
->stacksize
;
836 if (i
!= repl
->num_entries
) {
837 duprintf("translate_table: %u not %u entries\n",
838 i
, repl
->num_entries
);
842 /* Check hooks all assigned */
843 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
844 /* Only hooks which are valid */
845 if (!(repl
->valid_hooks
& (1 << i
)))
847 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
848 duprintf("Invalid hook entry %u %u\n",
849 i
, repl
->hook_entry
[i
]);
852 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
853 duprintf("Invalid underflow %u %u\n",
854 i
, repl
->underflow
[i
]);
859 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
862 /* Finally, each sanity check must pass */
864 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
865 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
872 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
875 cleanup_entry(iter
, net
);
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i
) {
882 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
883 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
890 get_counters(const struct xt_table_info
*t
,
891 struct xt_counters counters
[])
893 struct ip6t_entry
*iter
;
897 for_each_possible_cpu(cpu
) {
898 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
901 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
906 start
= read_seqcount_begin(s
);
907 bcnt
= iter
->counters
.bcnt
;
908 pcnt
= iter
->counters
.pcnt
;
909 } while (read_seqcount_retry(s
, start
));
911 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
917 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
919 unsigned int countersize
;
920 struct xt_counters
*counters
;
921 const struct xt_table_info
*private = table
->private;
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
926 countersize
= sizeof(struct xt_counters
) * private->number
;
927 counters
= vzalloc(countersize
);
929 if (counters
== NULL
)
930 return ERR_PTR(-ENOMEM
);
932 get_counters(private, counters
);
938 copy_entries_to_user(unsigned int total_size
,
939 const struct xt_table
*table
,
940 void __user
*userptr
)
942 unsigned int off
, num
;
943 const struct ip6t_entry
*e
;
944 struct xt_counters
*counters
;
945 const struct xt_table_info
*private = table
->private;
947 const void *loc_cpu_entry
;
949 counters
= alloc_counters(table
);
950 if (IS_ERR(counters
))
951 return PTR_ERR(counters
);
953 /* choose the copy that is on our node/cpu, ...
954 * This choice is lazy (because current thread is
955 * allowed to migrate to another cpu)
957 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
958 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
963 /* FIXME: use iterator macros --RR */
964 /* ... then go back and fix counters and names */
965 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
967 const struct xt_entry_match
*m
;
968 const struct xt_entry_target
*t
;
970 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
971 if (copy_to_user(userptr
+ off
972 + offsetof(struct ip6t_entry
, counters
),
974 sizeof(counters
[num
])) != 0) {
979 for (i
= sizeof(struct ip6t_entry
);
980 i
< e
->target_offset
;
981 i
+= m
->u
.match_size
) {
984 if (copy_to_user(userptr
+ off
+ i
985 + offsetof(struct xt_entry_match
,
987 m
->u
.kernel
.match
->name
,
988 strlen(m
->u
.kernel
.match
->name
)+1)
995 t
= ip6t_get_target_c(e
);
996 if (copy_to_user(userptr
+ off
+ e
->target_offset
997 + offsetof(struct xt_entry_target
,
999 t
->u
.kernel
.target
->name
,
1000 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1011 #ifdef CONFIG_COMPAT
1012 static void compat_standard_from_user(void *dst
, const void *src
)
1014 int v
= *(compat_int_t
*)src
;
1017 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1018 memcpy(dst
, &v
, sizeof(v
));
1021 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1023 compat_int_t cv
= *(int *)src
;
1026 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1027 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1030 static int compat_calc_entry(const struct ip6t_entry
*e
,
1031 const struct xt_table_info
*info
,
1032 const void *base
, struct xt_table_info
*newinfo
)
1034 const struct xt_entry_match
*ematch
;
1035 const struct xt_entry_target
*t
;
1036 unsigned int entry_offset
;
1039 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1040 entry_offset
= (void *)e
- base
;
1041 xt_ematch_foreach(ematch
, e
)
1042 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1043 t
= ip6t_get_target_c(e
);
1044 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1045 newinfo
->size
-= off
;
1046 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1050 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1051 if (info
->hook_entry
[i
] &&
1052 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1053 newinfo
->hook_entry
[i
] -= off
;
1054 if (info
->underflow
[i
] &&
1055 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1056 newinfo
->underflow
[i
] -= off
;
1061 static int compat_table_info(const struct xt_table_info
*info
,
1062 struct xt_table_info
*newinfo
)
1064 struct ip6t_entry
*iter
;
1065 void *loc_cpu_entry
;
1068 if (!newinfo
|| !info
)
1071 /* we dont care about newinfo->entries[] */
1072 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1073 newinfo
->initial_entries
= 0;
1074 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1075 xt_compat_init_offsets(AF_INET6
, info
->number
);
1076 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1077 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1085 static int get_info(struct net
*net
, void __user
*user
,
1086 const int *len
, int compat
)
1088 char name
[XT_TABLE_MAXNAMELEN
];
1092 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1093 duprintf("length %u != %zu\n", *len
,
1094 sizeof(struct ip6t_getinfo
));
1098 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1101 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1102 #ifdef CONFIG_COMPAT
1104 xt_compat_lock(AF_INET6
);
1106 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1107 "ip6table_%s", name
);
1108 if (!IS_ERR_OR_NULL(t
)) {
1109 struct ip6t_getinfo info
;
1110 const struct xt_table_info
*private = t
->private;
1111 #ifdef CONFIG_COMPAT
1112 struct xt_table_info tmp
;
1115 ret
= compat_table_info(private, &tmp
);
1116 xt_compat_flush_offsets(AF_INET6
);
1120 memset(&info
, 0, sizeof(info
));
1121 info
.valid_hooks
= t
->valid_hooks
;
1122 memcpy(info
.hook_entry
, private->hook_entry
,
1123 sizeof(info
.hook_entry
));
1124 memcpy(info
.underflow
, private->underflow
,
1125 sizeof(info
.underflow
));
1126 info
.num_entries
= private->number
;
1127 info
.size
= private->size
;
1128 strcpy(info
.name
, name
);
1130 if (copy_to_user(user
, &info
, *len
) != 0)
1138 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1139 #ifdef CONFIG_COMPAT
1141 xt_compat_unlock(AF_INET6
);
1147 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1151 struct ip6t_get_entries get
;
1154 if (*len
< sizeof(get
)) {
1155 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1158 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1160 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len
, sizeof(get
) + get
.size
);
1166 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1167 if (!IS_ERR_OR_NULL(t
)) {
1168 struct xt_table_info
*private = t
->private;
1169 duprintf("t->private->number = %u\n", private->number
);
1170 if (get
.size
== private->size
)
1171 ret
= copy_entries_to_user(private->size
,
1172 t
, uptr
->entrytable
);
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size
, get
.size
);
1181 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1187 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1188 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1189 void __user
*counters_ptr
)
1193 struct xt_table_info
*oldinfo
;
1194 struct xt_counters
*counters
;
1195 const void *loc_cpu_old_entry
;
1196 struct ip6t_entry
*iter
;
1199 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1205 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1206 "ip6table_%s", name
);
1207 if (IS_ERR_OR_NULL(t
)) {
1208 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1209 goto free_newinfo_counters_untrans
;
1213 if (valid_hooks
!= t
->valid_hooks
) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks
, t
->valid_hooks
);
1220 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1227 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1228 (newinfo
->number
<= oldinfo
->initial_entries
))
1230 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1231 (newinfo
->number
<= oldinfo
->initial_entries
))
1234 /* Get the old counters, and synchronize with replace */
1235 get_counters(oldinfo
, counters
);
1237 /* Decrease module usage counts and free resource */
1238 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1239 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1240 cleanup_entry(iter
, net
);
1242 xt_free_table_info(oldinfo
);
1243 if (copy_to_user(counters_ptr
, counters
,
1244 sizeof(struct xt_counters
) * num_counters
) != 0) {
1245 /* Silent error, can't fail, new table is already in place */
1246 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1255 free_newinfo_counters_untrans
:
1262 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1265 struct ip6t_replace tmp
;
1266 struct xt_table_info
*newinfo
;
1267 void *loc_cpu_entry
;
1268 struct ip6t_entry
*iter
;
1270 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1273 /* overflow check */
1274 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1276 if (tmp
.num_counters
== 0)
1279 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1281 newinfo
= xt_alloc_table_info(tmp
.size
);
1285 /* choose the copy that is on our node/cpu */
1286 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1287 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1293 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1297 duprintf("ip_tables: Translated table\n");
1299 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1300 tmp
.num_counters
, tmp
.counters
);
1302 goto free_newinfo_untrans
;
1305 free_newinfo_untrans
:
1306 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1307 cleanup_entry(iter
, net
);
1309 xt_free_table_info(newinfo
);
1314 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1317 unsigned int i
, curcpu
;
1318 struct xt_counters_info tmp
;
1319 struct xt_counters
*paddc
;
1321 const struct xt_table_info
*private;
1323 const void *loc_cpu_entry
;
1324 struct ip6t_entry
*iter
;
1325 unsigned int addend
;
1327 paddc
= xt_copy_counters_from_user(user
, len
, &tmp
, compat
);
1329 return PTR_ERR(paddc
);
1330 t
= xt_find_table_lock(net
, AF_INET6
, tmp
.name
);
1331 if (IS_ERR_OR_NULL(t
)) {
1332 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1338 private = t
->private;
1339 if (private->number
!= tmp
.num_counters
) {
1341 goto unlock_up_free
;
1345 /* Choose the copy that is on our node */
1346 curcpu
= smp_processor_id();
1347 addend
= xt_write_recseq_begin();
1348 loc_cpu_entry
= private->entries
[curcpu
];
1349 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1350 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1353 xt_write_recseq_end(addend
);
1365 #ifdef CONFIG_COMPAT
1366 struct compat_ip6t_replace
{
1367 char name
[XT_TABLE_MAXNAMELEN
];
1371 u32 hook_entry
[NF_INET_NUMHOOKS
];
1372 u32 underflow
[NF_INET_NUMHOOKS
];
1374 compat_uptr_t counters
; /* struct xt_counters * */
1375 struct compat_ip6t_entry entries
[0];
1379 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1380 unsigned int *size
, struct xt_counters
*counters
,
1383 struct xt_entry_target
*t
;
1384 struct compat_ip6t_entry __user
*ce
;
1385 u_int16_t target_offset
, next_offset
;
1386 compat_uint_t origsize
;
1387 const struct xt_entry_match
*ematch
;
1391 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1392 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1393 copy_to_user(&ce
->counters
, &counters
[i
],
1394 sizeof(counters
[i
])) != 0)
1397 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1398 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1400 xt_ematch_foreach(ematch
, e
) {
1401 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1405 target_offset
= e
->target_offset
- (origsize
- *size
);
1406 t
= ip6t_get_target(e
);
1407 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1410 next_offset
= e
->next_offset
- (origsize
- *size
);
1411 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1412 put_user(next_offset
, &ce
->next_offset
) != 0)
1418 compat_find_calc_match(struct xt_entry_match
*m
,
1419 const struct ip6t_ip6
*ipv6
,
1420 unsigned int hookmask
,
1423 struct xt_match
*match
;
1425 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1426 m
->u
.user
.revision
);
1427 if (IS_ERR(match
)) {
1428 duprintf("compat_check_calc_match: `%s' not found\n",
1430 return PTR_ERR(match
);
1432 m
->u
.kernel
.match
= match
;
1433 *size
+= xt_compat_match_offset(match
);
1437 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1439 struct xt_entry_target
*t
;
1440 struct xt_entry_match
*ematch
;
1442 /* Cleanup all matches */
1443 xt_ematch_foreach(ematch
, e
)
1444 module_put(ematch
->u
.kernel
.match
->me
);
1445 t
= compat_ip6t_get_target(e
);
1446 module_put(t
->u
.kernel
.target
->me
);
1450 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1451 struct xt_table_info
*newinfo
,
1453 const unsigned char *base
,
1454 const unsigned char *limit
)
1456 struct xt_entry_match
*ematch
;
1457 struct xt_entry_target
*t
;
1458 struct xt_target
*target
;
1459 unsigned int entry_offset
;
1463 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1464 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1465 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
||
1466 (unsigned char *)e
+ e
->next_offset
> limit
) {
1467 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1471 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1472 sizeof(struct compat_xt_entry_target
)) {
1473 duprintf("checking: element %p size %u\n",
1478 /* For purposes of check_entry casting the compat entry is fine */
1479 ret
= check_entry((struct ip6t_entry
*)e
);
1484 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1485 entry_offset
= (void *)e
- (void *)base
;
1487 xt_ematch_foreach(ematch
, e
) {
1488 ret
= compat_find_calc_match(ematch
, &e
->ipv6
, e
->comefrom
,
1491 goto release_matches
;
1495 t
= compat_ip6t_get_target(e
);
1496 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1497 t
->u
.user
.revision
);
1498 if (IS_ERR(target
)) {
1499 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1501 ret
= PTR_ERR(target
);
1502 goto release_matches
;
1504 t
->u
.kernel
.target
= target
;
1506 off
+= xt_compat_target_offset(target
);
1508 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1515 module_put(t
->u
.kernel
.target
->me
);
1517 xt_ematch_foreach(ematch
, e
) {
1520 module_put(ematch
->u
.kernel
.match
->me
);
1526 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1528 struct xt_table_info
*newinfo
, unsigned char *base
)
1530 struct xt_entry_target
*t
;
1531 struct ip6t_entry
*de
;
1532 unsigned int origsize
;
1534 struct xt_entry_match
*ematch
;
1537 de
= (struct ip6t_entry
*)*dstptr
;
1538 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1539 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1541 *dstptr
+= sizeof(struct ip6t_entry
);
1542 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1544 xt_ematch_foreach(ematch
, e
)
1545 xt_compat_match_from_user(ematch
, dstptr
, size
);
1547 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1548 t
= compat_ip6t_get_target(e
);
1549 xt_compat_target_from_user(t
, dstptr
, size
);
1551 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1552 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1553 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1554 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1555 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1556 newinfo
->underflow
[h
] -= origsize
- *size
;
1561 translate_compat_table(struct net
*net
,
1562 struct xt_table_info
**pinfo
,
1564 const struct compat_ip6t_replace
*compatr
)
1567 struct xt_table_info
*newinfo
, *info
;
1568 void *pos
, *entry0
, *entry1
;
1569 struct compat_ip6t_entry
*iter0
;
1570 struct ip6t_replace repl
;
1576 size
= compatr
->size
;
1577 info
->number
= compatr
->num_entries
;
1579 duprintf("translate_compat_table: size %u\n", info
->size
);
1581 xt_compat_lock(AF_INET6
);
1582 xt_compat_init_offsets(AF_INET6
, compatr
->num_entries
);
1583 /* Walk through entries, checking offsets. */
1584 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1585 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1587 entry0
+ compatr
->size
);
1594 if (j
!= compatr
->num_entries
) {
1595 duprintf("translate_compat_table: %u not %u entries\n",
1596 j
, compatr
->num_entries
);
1601 newinfo
= xt_alloc_table_info(size
);
1605 newinfo
->number
= compatr
->num_entries
;
1606 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1607 newinfo
->hook_entry
[i
] = compatr
->hook_entry
[i
];
1608 newinfo
->underflow
[i
] = compatr
->underflow
[i
];
1610 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1612 size
= compatr
->size
;
1613 xt_entry_foreach(iter0
, entry0
, compatr
->size
)
1614 compat_copy_entry_from_user(iter0
, &pos
, &size
,
1617 /* all module references in entry0 are now gone. */
1618 xt_compat_flush_offsets(AF_INET6
);
1619 xt_compat_unlock(AF_INET6
);
1621 memcpy(&repl
, compatr
, sizeof(*compatr
));
1623 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1624 repl
.hook_entry
[i
] = newinfo
->hook_entry
[i
];
1625 repl
.underflow
[i
] = newinfo
->underflow
[i
];
1628 repl
.num_counters
= 0;
1629 repl
.counters
= NULL
;
1630 repl
.size
= newinfo
->size
;
1631 ret
= translate_table(net
, newinfo
, entry1
, &repl
);
1637 xt_free_table_info(info
);
1641 xt_free_table_info(newinfo
);
1644 xt_compat_flush_offsets(AF_INET6
);
1645 xt_compat_unlock(AF_INET6
);
1646 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1649 compat_release_entry(iter0
);
1655 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1658 struct compat_ip6t_replace tmp
;
1659 struct xt_table_info
*newinfo
;
1660 void *loc_cpu_entry
;
1661 struct ip6t_entry
*iter
;
1663 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1666 /* overflow check */
1667 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1669 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1671 if (tmp
.num_counters
== 0)
1674 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1676 newinfo
= xt_alloc_table_info(tmp
.size
);
1680 /* choose the copy that is on our node/cpu */
1681 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1682 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1688 ret
= translate_compat_table(net
, &newinfo
, &loc_cpu_entry
, &tmp
);
1692 duprintf("compat_do_replace: Translated table\n");
1694 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1695 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1697 goto free_newinfo_untrans
;
1700 free_newinfo_untrans
:
1701 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1702 cleanup_entry(iter
, net
);
1704 xt_free_table_info(newinfo
);
1709 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1714 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1718 case IP6T_SO_SET_REPLACE
:
1719 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1722 case IP6T_SO_SET_ADD_COUNTERS
:
1723 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1727 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1734 struct compat_ip6t_get_entries
{
1735 char name
[XT_TABLE_MAXNAMELEN
];
1737 struct compat_ip6t_entry entrytable
[0];
1741 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1742 void __user
*userptr
)
1744 struct xt_counters
*counters
;
1745 const struct xt_table_info
*private = table
->private;
1749 const void *loc_cpu_entry
;
1751 struct ip6t_entry
*iter
;
1753 counters
= alloc_counters(table
);
1754 if (IS_ERR(counters
))
1755 return PTR_ERR(counters
);
1757 /* choose the copy that is on our node/cpu, ...
1758 * This choice is lazy (because current thread is
1759 * allowed to migrate to another cpu)
1761 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1764 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1765 ret
= compat_copy_entry_to_user(iter
, &pos
,
1766 &size
, counters
, i
++);
1776 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1780 struct compat_ip6t_get_entries get
;
1783 if (*len
< sizeof(get
)) {
1784 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1788 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1791 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1792 duprintf("compat_get_entries: %u != %zu\n",
1793 *len
, sizeof(get
) + get
.size
);
1797 xt_compat_lock(AF_INET6
);
1798 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1799 if (!IS_ERR_OR_NULL(t
)) {
1800 const struct xt_table_info
*private = t
->private;
1801 struct xt_table_info info
;
1802 duprintf("t->private->number = %u\n", private->number
);
1803 ret
= compat_table_info(private, &info
);
1804 if (!ret
&& get
.size
== info
.size
) {
1805 ret
= compat_copy_entries_to_user(private->size
,
1806 t
, uptr
->entrytable
);
1808 duprintf("compat_get_entries: I've got %u not %u!\n",
1809 private->size
, get
.size
);
1812 xt_compat_flush_offsets(AF_INET6
);
1816 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1818 xt_compat_unlock(AF_INET6
);
1822 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1825 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1829 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1833 case IP6T_SO_GET_INFO
:
1834 ret
= get_info(sock_net(sk
), user
, len
, 1);
1836 case IP6T_SO_GET_ENTRIES
:
1837 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1840 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
1847 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1851 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1855 case IP6T_SO_SET_REPLACE
:
1856 ret
= do_replace(sock_net(sk
), user
, len
);
1859 case IP6T_SO_SET_ADD_COUNTERS
:
1860 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1864 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1872 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1876 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1880 case IP6T_SO_GET_INFO
:
1881 ret
= get_info(sock_net(sk
), user
, len
, 0);
1884 case IP6T_SO_GET_ENTRIES
:
1885 ret
= get_entries(sock_net(sk
), user
, len
);
1888 case IP6T_SO_GET_REVISION_MATCH
:
1889 case IP6T_SO_GET_REVISION_TARGET
: {
1890 struct xt_get_revision rev
;
1893 if (*len
!= sizeof(rev
)) {
1897 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1901 rev
.name
[sizeof(rev
.name
)-1] = 0;
1903 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
1908 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
1911 "ip6t_%s", rev
.name
);
1916 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
1923 struct xt_table
*ip6t_register_table(struct net
*net
,
1924 const struct xt_table
*table
,
1925 const struct ip6t_replace
*repl
)
1928 struct xt_table_info
*newinfo
;
1929 struct xt_table_info bootstrap
= {0};
1930 void *loc_cpu_entry
;
1931 struct xt_table
*new_table
;
1933 newinfo
= xt_alloc_table_info(repl
->size
);
1939 /* choose the copy on our node/cpu, but dont care about preemption */
1940 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1941 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1943 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
1947 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1948 if (IS_ERR(new_table
)) {
1949 ret
= PTR_ERR(new_table
);
1955 xt_free_table_info(newinfo
);
1957 return ERR_PTR(ret
);
1960 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
1962 struct xt_table_info
*private;
1963 void *loc_cpu_entry
;
1964 struct module
*table_owner
= table
->me
;
1965 struct ip6t_entry
*iter
;
1967 private = xt_unregister_table(table
);
1969 /* Decrease module usage counts and free resources */
1970 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1971 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
1972 cleanup_entry(iter
, net
);
1973 if (private->number
> private->initial_entries
)
1974 module_put(table_owner
);
1975 xt_free_table_info(private);
1978 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1980 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
1981 u_int8_t type
, u_int8_t code
,
1984 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
1989 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
1991 const struct icmp6hdr
*ic
;
1992 struct icmp6hdr _icmph
;
1993 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
1995 /* Must not be a fragment. */
1996 if (par
->fragoff
!= 0)
1999 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2001 /* We've been asked to examine this packet, and we
2002 * can't. Hence, no choice but to drop.
2004 duprintf("Dropping evil ICMP tinygram.\n");
2005 par
->hotdrop
= true;
2009 return icmp6_type_code_match(icmpinfo
->type
,
2012 ic
->icmp6_type
, ic
->icmp6_code
,
2013 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2016 /* Called when user tries to insert an entry of this type. */
2017 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2019 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2021 /* Must specify no unknown invflags */
2022 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2025 /* The built-in targets: standard (NULL) and error. */
2026 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2028 .name
= XT_STANDARD_TARGET
,
2029 .targetsize
= sizeof(int),
2030 .family
= NFPROTO_IPV6
,
2031 #ifdef CONFIG_COMPAT
2032 .compatsize
= sizeof(compat_int_t
),
2033 .compat_from_user
= compat_standard_from_user
,
2034 .compat_to_user
= compat_standard_to_user
,
2038 .name
= XT_ERROR_TARGET
,
2039 .target
= ip6t_error
,
2040 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2041 .family
= NFPROTO_IPV6
,
2045 static struct nf_sockopt_ops ip6t_sockopts
= {
2047 .set_optmin
= IP6T_BASE_CTL
,
2048 .set_optmax
= IP6T_SO_SET_MAX
+1,
2049 .set
= do_ip6t_set_ctl
,
2050 #ifdef CONFIG_COMPAT
2051 .compat_set
= compat_do_ip6t_set_ctl
,
2053 .get_optmin
= IP6T_BASE_CTL
,
2054 .get_optmax
= IP6T_SO_GET_MAX
+1,
2055 .get
= do_ip6t_get_ctl
,
2056 #ifdef CONFIG_COMPAT
2057 .compat_get
= compat_do_ip6t_get_ctl
,
2059 .owner
= THIS_MODULE
,
2062 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2065 .match
= icmp6_match
,
2066 .matchsize
= sizeof(struct ip6t_icmp
),
2067 .checkentry
= icmp6_checkentry
,
2068 .proto
= IPPROTO_ICMPV6
,
2069 .family
= NFPROTO_IPV6
,
2073 static int __net_init
ip6_tables_net_init(struct net
*net
)
2075 return xt_proto_init(net
, NFPROTO_IPV6
);
2078 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2080 xt_proto_fini(net
, NFPROTO_IPV6
);
2083 static struct pernet_operations ip6_tables_net_ops
= {
2084 .init
= ip6_tables_net_init
,
2085 .exit
= ip6_tables_net_exit
,
2088 static int __init
ip6_tables_init(void)
2092 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2096 /* No one else will be downing sem now, so we won't sleep */
2097 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2100 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2104 /* Register setsockopt */
2105 ret
= nf_register_sockopt(&ip6t_sockopts
);
2109 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2113 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2115 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2117 unregister_pernet_subsys(&ip6_tables_net_ops
);
2122 static void __exit
ip6_tables_fini(void)
2124 nf_unregister_sockopt(&ip6t_sockopts
);
2126 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2127 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2128 unregister_pernet_subsys(&ip6_tables_net_ops
);
2131 EXPORT_SYMBOL(ip6t_register_table
);
2132 EXPORT_SYMBOL(ip6t_unregister_table
);
2133 EXPORT_SYMBOL(ip6t_do_table
);
2135 module_init(ip6_tables_init
);
2136 module_exit(ip6_tables_fini
);