netfilter: x_tables: validate e->target_offset early
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
21 #include <net/ip.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65
66 void *ipt_alloc_initial_table(const struct xt_table *info)
67 {
68 return xt_alloc_initial_table(ipt, IPT);
69 }
70 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
71
72 /* Returns whether matches rule or not. */
73 /* Performance critical - called for every packet */
74 static inline bool
75 ip_packet_match(const struct iphdr *ip,
76 const char *indev,
77 const char *outdev,
78 const struct ipt_ip *ipinfo,
79 int isfrag)
80 {
81 unsigned long ret;
82
83 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
84
85 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
86 IPT_INV_SRCIP) ||
87 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
88 IPT_INV_DSTIP)) {
89 dprintf("Source or dest mismatch.\n");
90
91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
93 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
96 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
97 return false;
98 }
99
100 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
101
102 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
103 dprintf("VIA in mismatch (%s vs %s).%s\n",
104 indev, ipinfo->iniface,
105 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
106 return false;
107 }
108
109 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
110
111 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
112 dprintf("VIA out mismatch (%s vs %s).%s\n",
113 outdev, ipinfo->outiface,
114 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
115 return false;
116 }
117
118 /* Check specific protocol */
119 if (ipinfo->proto &&
120 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
121 dprintf("Packet protocol %hi does not match %hi.%s\n",
122 ip->protocol, ipinfo->proto,
123 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
124 return false;
125 }
126
127 /* If we have a fragment rule but the packet is not a fragment
128 * then we return zero */
129 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
130 dprintf("Fragment rule but not fragment.%s\n",
131 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
132 return false;
133 }
134
135 return true;
136 }
137
138 static bool
139 ip_checkentry(const struct ipt_ip *ip)
140 {
141 if (ip->flags & ~IPT_F_MASK) {
142 duprintf("Unknown flag bits set: %08X\n",
143 ip->flags & ~IPT_F_MASK);
144 return false;
145 }
146 if (ip->invflags & ~IPT_INV_MASK) {
147 duprintf("Unknown invflag bits set: %08X\n",
148 ip->invflags & ~IPT_INV_MASK);
149 return false;
150 }
151 return true;
152 }
153
154 static unsigned int
155 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
156 {
157 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
158
159 return NF_DROP;
160 }
161
162 /* Performance critical */
163 static inline struct ipt_entry *
164 get_entry(const void *base, unsigned int offset)
165 {
166 return (struct ipt_entry *)(base + offset);
167 }
168
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_ip *ip)
172 {
173 static const struct ipt_ip uncond;
174
175 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
176 #undef FWINV
177 }
178
179 /* for const-correctness */
180 static inline const struct xt_entry_target *
181 ipt_get_target_c(const struct ipt_entry *e)
182 {
183 return ipt_get_target((struct ipt_entry *)e);
184 }
185
186 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
187 static const char *const hooknames[] = {
188 [NF_INET_PRE_ROUTING] = "PREROUTING",
189 [NF_INET_LOCAL_IN] = "INPUT",
190 [NF_INET_FORWARD] = "FORWARD",
191 [NF_INET_LOCAL_OUT] = "OUTPUT",
192 [NF_INET_POST_ROUTING] = "POSTROUTING",
193 };
194
195 enum nf_ip_trace_comments {
196 NF_IP_TRACE_COMMENT_RULE,
197 NF_IP_TRACE_COMMENT_RETURN,
198 NF_IP_TRACE_COMMENT_POLICY,
199 };
200
201 static const char *const comments[] = {
202 [NF_IP_TRACE_COMMENT_RULE] = "rule",
203 [NF_IP_TRACE_COMMENT_RETURN] = "return",
204 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
205 };
206
207 static struct nf_loginfo trace_loginfo = {
208 .type = NF_LOG_TYPE_LOG,
209 .u = {
210 .log = {
211 .level = 4,
212 .logflags = NF_LOG_MASK,
213 },
214 },
215 };
216
217 /* Mildly perf critical (only if packet tracing is on) */
218 static inline int
219 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
220 const char *hookname, const char **chainname,
221 const char **comment, unsigned int *rulenum)
222 {
223 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
224
225 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
226 /* Head of user chain: ERROR target with chainname */
227 *chainname = t->target.data;
228 (*rulenum) = 0;
229 } else if (s == e) {
230 (*rulenum)++;
231
232 if (s->target_offset == sizeof(struct ipt_entry) &&
233 strcmp(t->target.u.kernel.target->name,
234 XT_STANDARD_TARGET) == 0 &&
235 t->verdict < 0 &&
236 unconditional(&s->ip)) {
237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment = *chainname == hookname
239 ? comments[NF_IP_TRACE_COMMENT_POLICY]
240 : comments[NF_IP_TRACE_COMMENT_RETURN];
241 }
242 return 1;
243 } else
244 (*rulenum)++;
245
246 return 0;
247 }
248
249 static void trace_packet(const struct sk_buff *skb,
250 unsigned int hook,
251 const struct net_device *in,
252 const struct net_device *out,
253 const char *tablename,
254 const struct xt_table_info *private,
255 const struct ipt_entry *e)
256 {
257 const void *table_base;
258 const struct ipt_entry *root;
259 const char *hookname, *chainname, *comment;
260 const struct ipt_entry *iter;
261 unsigned int rulenum = 0;
262 struct net *net = dev_net(in ? in : out);
263
264 table_base = private->entries[smp_processor_id()];
265 root = get_entry(table_base, private->hook_entry[hook]);
266
267 hookname = chainname = hooknames[hook];
268 comment = comments[NF_IP_TRACE_COMMENT_RULE];
269
270 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
271 if (get_chainname_rulenum(iter, e, hookname,
272 &chainname, &comment, &rulenum) != 0)
273 break;
274
275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276 "TRACE: %s:%s:%s:%u ",
277 tablename, chainname, comment, rulenum);
278 }
279 #endif
280
281 static inline __pure
282 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
283 {
284 return (void *)entry + entry->next_offset;
285 }
286
287 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
288 unsigned int
289 ipt_do_table(struct sk_buff *skb,
290 unsigned int hook,
291 const struct net_device *in,
292 const struct net_device *out,
293 struct xt_table *table)
294 {
295 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
296 const struct iphdr *ip;
297 /* Initializing verdict to NF_DROP keeps gcc happy. */
298 unsigned int verdict = NF_DROP;
299 const char *indev, *outdev;
300 const void *table_base;
301 struct ipt_entry *e, **jumpstack;
302 unsigned int *stackptr, origptr, cpu;
303 const struct xt_table_info *private;
304 struct xt_action_param acpar;
305 unsigned int addend;
306
307 /* Initialization */
308 ip = ip_hdr(skb);
309 indev = in ? in->name : nulldevname;
310 outdev = out ? out->name : nulldevname;
311 /* We handle fragments by dealing with the first fragment as
312 * if it was a normal packet. All other fragments are treated
313 * normally, except that they will NEVER match rules that ask
314 * things we don't know, ie. tcp syn flag or ports). If the
315 * rule is also a fragment-specific rule, non-fragments won't
316 * match it. */
317 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
318 acpar.thoff = ip_hdrlen(skb);
319 acpar.hotdrop = false;
320 acpar.in = in;
321 acpar.out = out;
322 acpar.family = NFPROTO_IPV4;
323 acpar.hooknum = hook;
324
325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
326 local_bh_disable();
327 addend = xt_write_recseq_begin();
328 private = table->private;
329 cpu = smp_processor_id();
330 table_base = private->entries[cpu];
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
332 stackptr = per_cpu_ptr(private->stackptr, cpu);
333 origptr = *stackptr;
334
335 e = get_entry(table_base, private->hook_entry[hook]);
336
337 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
338 table->name, hook, origptr,
339 get_entry(table_base, private->underflow[hook]));
340
341 do {
342 const struct xt_entry_target *t;
343 const struct xt_entry_match *ematch;
344
345 IP_NF_ASSERT(e);
346 if (!ip_packet_match(ip, indev, outdev,
347 &e->ip, acpar.fragoff)) {
348 no_match:
349 e = ipt_next_entry(e);
350 continue;
351 }
352
353 xt_ematch_foreach(ematch, e) {
354 acpar.match = ematch->u.kernel.match;
355 acpar.matchinfo = ematch->data;
356 if (!acpar.match->match(skb, &acpar))
357 goto no_match;
358 }
359
360 ADD_COUNTER(e->counters, skb->len, 1);
361
362 t = ipt_get_target(e);
363 IP_NF_ASSERT(t->u.kernel.target);
364
365 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
366 /* The packet is traced: log it */
367 if (unlikely(skb->nf_trace))
368 trace_packet(skb, hook, in, out,
369 table->name, private, e);
370 #endif
371 /* Standard target? */
372 if (!t->u.kernel.target->target) {
373 int v;
374
375 v = ((struct xt_standard_target *)t)->verdict;
376 if (v < 0) {
377 /* Pop from stack? */
378 if (v != XT_RETURN) {
379 verdict = (unsigned int)(-v) - 1;
380 break;
381 }
382 if (*stackptr <= origptr) {
383 e = get_entry(table_base,
384 private->underflow[hook]);
385 pr_debug("Underflow (this is normal) "
386 "to %p\n", e);
387 } else {
388 e = jumpstack[--*stackptr];
389 pr_debug("Pulled %p out from pos %u\n",
390 e, *stackptr);
391 e = ipt_next_entry(e);
392 }
393 continue;
394 }
395 if (table_base + v != ipt_next_entry(e) &&
396 !(e->ip.flags & IPT_F_GOTO)) {
397 if (*stackptr >= private->stacksize) {
398 verdict = NF_DROP;
399 break;
400 }
401 jumpstack[(*stackptr)++] = e;
402 pr_debug("Pushed %p into pos %u\n",
403 e, *stackptr - 1);
404 }
405
406 e = get_entry(table_base, v);
407 continue;
408 }
409
410 acpar.target = t->u.kernel.target;
411 acpar.targinfo = t->data;
412
413 verdict = t->u.kernel.target->target(skb, &acpar);
414 /* Target might have changed stuff. */
415 ip = ip_hdr(skb);
416 if (verdict == XT_CONTINUE)
417 e = ipt_next_entry(e);
418 else
419 /* Verdict */
420 break;
421 } while (!acpar.hotdrop);
422 pr_debug("Exiting %s; resetting sp from %u to %u\n",
423 __func__, *stackptr, origptr);
424 *stackptr = origptr;
425 xt_write_recseq_end(addend);
426 local_bh_enable();
427
428 #ifdef DEBUG_ALLOW_ALL
429 return NF_ACCEPT;
430 #else
431 if (acpar.hotdrop)
432 return NF_DROP;
433 else return verdict;
434 #endif
435 }
436
437 /* Figures out from what hook each rule can be called: returns 0 if
438 there are loops. Puts hook bitmask in comefrom. */
439 static int
440 mark_source_chains(const struct xt_table_info *newinfo,
441 unsigned int valid_hooks, void *entry0)
442 {
443 unsigned int hook;
444
445 /* No recursion; use packet counter to save back ptrs (reset
446 to 0 as we leave), and comefrom to save source hook bitmask */
447 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
448 unsigned int pos = newinfo->hook_entry[hook];
449 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
450
451 if (!(valid_hooks & (1 << hook)))
452 continue;
453
454 /* Set initial back pointer. */
455 e->counters.pcnt = pos;
456
457 for (;;) {
458 const struct xt_standard_target *t
459 = (void *)ipt_get_target_c(e);
460 int visited = e->comefrom & (1 << hook);
461
462 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
463 pr_err("iptables: loop hook %u pos %u %08X.\n",
464 hook, pos, e->comefrom);
465 return 0;
466 }
467 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
468
469 /* Unconditional return/END. */
470 if ((e->target_offset == sizeof(struct ipt_entry) &&
471 (strcmp(t->target.u.user.name,
472 XT_STANDARD_TARGET) == 0) &&
473 t->verdict < 0 && unconditional(&e->ip)) ||
474 visited) {
475 unsigned int oldpos, size;
476
477 if ((strcmp(t->target.u.user.name,
478 XT_STANDARD_TARGET) == 0) &&
479 t->verdict < -NF_MAX_VERDICT - 1) {
480 duprintf("mark_source_chains: bad "
481 "negative verdict (%i)\n",
482 t->verdict);
483 return 0;
484 }
485
486 /* Return: backtrack through the last
487 big jump. */
488 do {
489 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
490 #ifdef DEBUG_IP_FIREWALL_USER
491 if (e->comefrom
492 & (1 << NF_INET_NUMHOOKS)) {
493 duprintf("Back unset "
494 "on hook %u "
495 "rule %u\n",
496 hook, pos);
497 }
498 #endif
499 oldpos = pos;
500 pos = e->counters.pcnt;
501 e->counters.pcnt = 0;
502
503 /* We're at the start. */
504 if (pos == oldpos)
505 goto next;
506
507 e = (struct ipt_entry *)
508 (entry0 + pos);
509 } while (oldpos == pos + e->next_offset);
510
511 /* Move along one */
512 size = e->next_offset;
513 e = (struct ipt_entry *)
514 (entry0 + pos + size);
515 e->counters.pcnt = pos;
516 pos += size;
517 } else {
518 int newpos = t->verdict;
519
520 if (strcmp(t->target.u.user.name,
521 XT_STANDARD_TARGET) == 0 &&
522 newpos >= 0) {
523 if (newpos > newinfo->size -
524 sizeof(struct ipt_entry)) {
525 duprintf("mark_source_chains: "
526 "bad verdict (%i)\n",
527 newpos);
528 return 0;
529 }
530 /* This a jump; chase it. */
531 duprintf("Jump rule %u -> %u\n",
532 pos, newpos);
533 } else {
534 /* ... this is a fallthru */
535 newpos = pos + e->next_offset;
536 }
537 e = (struct ipt_entry *)
538 (entry0 + newpos);
539 e->counters.pcnt = pos;
540 pos = newpos;
541 }
542 }
543 next:
544 duprintf("Finished chain %u\n", hook);
545 }
546 return 1;
547 }
548
549 static void cleanup_match(struct xt_entry_match *m, struct net *net)
550 {
551 struct xt_mtdtor_param par;
552
553 par.net = net;
554 par.match = m->u.kernel.match;
555 par.matchinfo = m->data;
556 par.family = NFPROTO_IPV4;
557 if (par.match->destroy != NULL)
558 par.match->destroy(&par);
559 module_put(par.match->me);
560 }
561
562 static int
563 check_entry(const struct ipt_entry *e)
564 {
565 const struct xt_entry_target *t;
566
567 if (!ip_checkentry(&e->ip))
568 return -EINVAL;
569
570 if (e->target_offset + sizeof(struct xt_entry_target) >
571 e->next_offset)
572 return -EINVAL;
573
574 t = ipt_get_target_c(e);
575 if (e->target_offset + t->u.target_size > e->next_offset)
576 return -EINVAL;
577
578 return 0;
579 }
580
581 static int
582 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
583 {
584 const struct ipt_ip *ip = par->entryinfo;
585 int ret;
586
587 par->match = m->u.kernel.match;
588 par->matchinfo = m->data;
589
590 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
591 ip->proto, ip->invflags & IPT_INV_PROTO);
592 if (ret < 0) {
593 duprintf("check failed for `%s'.\n", par->match->name);
594 return ret;
595 }
596 return 0;
597 }
598
599 static int
600 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
601 {
602 struct xt_match *match;
603 int ret;
604
605 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
606 m->u.user.revision);
607 if (IS_ERR(match)) {
608 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
609 return PTR_ERR(match);
610 }
611 m->u.kernel.match = match;
612
613 ret = check_match(m, par);
614 if (ret)
615 goto err;
616
617 return 0;
618 err:
619 module_put(m->u.kernel.match->me);
620 return ret;
621 }
622
623 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
624 {
625 struct xt_entry_target *t = ipt_get_target(e);
626 struct xt_tgchk_param par = {
627 .net = net,
628 .table = name,
629 .entryinfo = e,
630 .target = t->u.kernel.target,
631 .targinfo = t->data,
632 .hook_mask = e->comefrom,
633 .family = NFPROTO_IPV4,
634 };
635 int ret;
636
637 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
638 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
639 if (ret < 0) {
640 duprintf("check failed for `%s'.\n",
641 t->u.kernel.target->name);
642 return ret;
643 }
644 return 0;
645 }
646
647 static int
648 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
649 unsigned int size)
650 {
651 struct xt_entry_target *t;
652 struct xt_target *target;
653 int ret;
654 unsigned int j;
655 struct xt_mtchk_param mtpar;
656 struct xt_entry_match *ematch;
657
658 j = 0;
659 mtpar.net = net;
660 mtpar.table = name;
661 mtpar.entryinfo = &e->ip;
662 mtpar.hook_mask = e->comefrom;
663 mtpar.family = NFPROTO_IPV4;
664 xt_ematch_foreach(ematch, e) {
665 ret = find_check_match(ematch, &mtpar);
666 if (ret != 0)
667 goto cleanup_matches;
668 ++j;
669 }
670
671 t = ipt_get_target(e);
672 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
673 t->u.user.revision);
674 if (IS_ERR(target)) {
675 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
676 ret = PTR_ERR(target);
677 goto cleanup_matches;
678 }
679 t->u.kernel.target = target;
680
681 ret = check_target(e, net, name);
682 if (ret)
683 goto err;
684 return 0;
685 err:
686 module_put(t->u.kernel.target->me);
687 cleanup_matches:
688 xt_ematch_foreach(ematch, e) {
689 if (j-- == 0)
690 break;
691 cleanup_match(ematch, net);
692 }
693 return ret;
694 }
695
696 static bool check_underflow(const struct ipt_entry *e)
697 {
698 const struct xt_entry_target *t;
699 unsigned int verdict;
700
701 if (!unconditional(&e->ip))
702 return false;
703 t = ipt_get_target_c(e);
704 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
705 return false;
706 verdict = ((struct xt_standard_target *)t)->verdict;
707 verdict = -verdict - 1;
708 return verdict == NF_DROP || verdict == NF_ACCEPT;
709 }
710
711 static int
712 check_entry_size_and_hooks(struct ipt_entry *e,
713 struct xt_table_info *newinfo,
714 const unsigned char *base,
715 const unsigned char *limit,
716 const unsigned int *hook_entries,
717 const unsigned int *underflows,
718 unsigned int valid_hooks)
719 {
720 unsigned int h;
721 int err;
722
723 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
724 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
725 duprintf("Bad offset %p\n", e);
726 return -EINVAL;
727 }
728
729 if (e->next_offset
730 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
731 duprintf("checking: element %p size %u\n",
732 e, e->next_offset);
733 return -EINVAL;
734 }
735
736 err = check_entry(e);
737 if (err)
738 return err;
739
740 /* Check hooks & underflows */
741 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
742 if (!(valid_hooks & (1 << h)))
743 continue;
744 if ((unsigned char *)e - base == hook_entries[h])
745 newinfo->hook_entry[h] = hook_entries[h];
746 if ((unsigned char *)e - base == underflows[h]) {
747 if (!check_underflow(e)) {
748 pr_err("Underflows must be unconditional and "
749 "use the STANDARD target with "
750 "ACCEPT/DROP\n");
751 return -EINVAL;
752 }
753 newinfo->underflow[h] = underflows[h];
754 }
755 }
756
757 /* Clear counters and comefrom */
758 e->counters = ((struct xt_counters) { 0, 0 });
759 e->comefrom = 0;
760 return 0;
761 }
762
763 static void
764 cleanup_entry(struct ipt_entry *e, struct net *net)
765 {
766 struct xt_tgdtor_param par;
767 struct xt_entry_target *t;
768 struct xt_entry_match *ematch;
769
770 /* Cleanup all matches */
771 xt_ematch_foreach(ematch, e)
772 cleanup_match(ematch, net);
773 t = ipt_get_target(e);
774
775 par.net = net;
776 par.target = t->u.kernel.target;
777 par.targinfo = t->data;
778 par.family = NFPROTO_IPV4;
779 if (par.target->destroy != NULL)
780 par.target->destroy(&par);
781 module_put(par.target->me);
782 }
783
784 /* Checks and translates the user-supplied table segment (held in
785 newinfo) */
786 static int
787 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
788 const struct ipt_replace *repl)
789 {
790 struct ipt_entry *iter;
791 unsigned int i;
792 int ret = 0;
793
794 newinfo->size = repl->size;
795 newinfo->number = repl->num_entries;
796
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
801 }
802
803 duprintf("translate_table: size %u\n", newinfo->size);
804 i = 0;
805 /* Walk through entries, checking offsets. */
806 xt_entry_foreach(iter, entry0, newinfo->size) {
807 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
808 entry0 + repl->size,
809 repl->hook_entry,
810 repl->underflow,
811 repl->valid_hooks);
812 if (ret != 0)
813 return ret;
814 ++i;
815 if (strcmp(ipt_get_target(iter)->u.user.name,
816 XT_ERROR_TARGET) == 0)
817 ++newinfo->stacksize;
818 }
819
820 if (i != repl->num_entries) {
821 duprintf("translate_table: %u not %u entries\n",
822 i, repl->num_entries);
823 return -EINVAL;
824 }
825
826 /* Check hooks all assigned */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 /* Only hooks which are valid */
829 if (!(repl->valid_hooks & (1 << i)))
830 continue;
831 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
832 duprintf("Invalid hook entry %u %u\n",
833 i, repl->hook_entry[i]);
834 return -EINVAL;
835 }
836 if (newinfo->underflow[i] == 0xFFFFFFFF) {
837 duprintf("Invalid underflow %u %u\n",
838 i, repl->underflow[i]);
839 return -EINVAL;
840 }
841 }
842
843 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
844 return -ELOOP;
845
846 /* Finally, each sanity check must pass */
847 i = 0;
848 xt_entry_foreach(iter, entry0, newinfo->size) {
849 ret = find_check_entry(iter, net, repl->name, repl->size);
850 if (ret != 0)
851 break;
852 ++i;
853 }
854
855 if (ret != 0) {
856 xt_entry_foreach(iter, entry0, newinfo->size) {
857 if (i-- == 0)
858 break;
859 cleanup_entry(iter, net);
860 }
861 return ret;
862 }
863
864 /* And one copy for every other CPU */
865 for_each_possible_cpu(i) {
866 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
867 memcpy(newinfo->entries[i], entry0, newinfo->size);
868 }
869
870 return ret;
871 }
872
873 static void
874 get_counters(const struct xt_table_info *t,
875 struct xt_counters counters[])
876 {
877 struct ipt_entry *iter;
878 unsigned int cpu;
879 unsigned int i;
880
881 for_each_possible_cpu(cpu) {
882 seqcount_t *s = &per_cpu(xt_recseq, cpu);
883
884 i = 0;
885 xt_entry_foreach(iter, t->entries[cpu], t->size) {
886 u64 bcnt, pcnt;
887 unsigned int start;
888
889 do {
890 start = read_seqcount_begin(s);
891 bcnt = iter->counters.bcnt;
892 pcnt = iter->counters.pcnt;
893 } while (read_seqcount_retry(s, start));
894
895 ADD_COUNTER(counters[i], bcnt, pcnt);
896 ++i; /* macro does multi eval of i */
897 }
898 }
899 }
900
901 static struct xt_counters *alloc_counters(const struct xt_table *table)
902 {
903 unsigned int countersize;
904 struct xt_counters *counters;
905 const struct xt_table_info *private = table->private;
906
907 /* We need atomic snapshot of counters: rest doesn't change
908 (other than comefrom, which userspace doesn't care
909 about). */
910 countersize = sizeof(struct xt_counters) * private->number;
911 counters = vzalloc(countersize);
912
913 if (counters == NULL)
914 return ERR_PTR(-ENOMEM);
915
916 get_counters(private, counters);
917
918 return counters;
919 }
920
921 static int
922 copy_entries_to_user(unsigned int total_size,
923 const struct xt_table *table,
924 void __user *userptr)
925 {
926 unsigned int off, num;
927 const struct ipt_entry *e;
928 struct xt_counters *counters;
929 const struct xt_table_info *private = table->private;
930 int ret = 0;
931 const void *loc_cpu_entry;
932
933 counters = alloc_counters(table);
934 if (IS_ERR(counters))
935 return PTR_ERR(counters);
936
937 /* choose the copy that is on our node/cpu, ...
938 * This choice is lazy (because current thread is
939 * allowed to migrate to another cpu)
940 */
941 loc_cpu_entry = private->entries[raw_smp_processor_id()];
942 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
943 ret = -EFAULT;
944 goto free_counters;
945 }
946
947 /* FIXME: use iterator macros --RR */
948 /* ... then go back and fix counters and names */
949 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
950 unsigned int i;
951 const struct xt_entry_match *m;
952 const struct xt_entry_target *t;
953
954 e = (struct ipt_entry *)(loc_cpu_entry + off);
955 if (copy_to_user(userptr + off
956 + offsetof(struct ipt_entry, counters),
957 &counters[num],
958 sizeof(counters[num])) != 0) {
959 ret = -EFAULT;
960 goto free_counters;
961 }
962
963 for (i = sizeof(struct ipt_entry);
964 i < e->target_offset;
965 i += m->u.match_size) {
966 m = (void *)e + i;
967
968 if (copy_to_user(userptr + off + i
969 + offsetof(struct xt_entry_match,
970 u.user.name),
971 m->u.kernel.match->name,
972 strlen(m->u.kernel.match->name)+1)
973 != 0) {
974 ret = -EFAULT;
975 goto free_counters;
976 }
977 }
978
979 t = ipt_get_target_c(e);
980 if (copy_to_user(userptr + off + e->target_offset
981 + offsetof(struct xt_entry_target,
982 u.user.name),
983 t->u.kernel.target->name,
984 strlen(t->u.kernel.target->name)+1) != 0) {
985 ret = -EFAULT;
986 goto free_counters;
987 }
988 }
989
990 free_counters:
991 vfree(counters);
992 return ret;
993 }
994
995 #ifdef CONFIG_COMPAT
996 static void compat_standard_from_user(void *dst, const void *src)
997 {
998 int v = *(compat_int_t *)src;
999
1000 if (v > 0)
1001 v += xt_compat_calc_jump(AF_INET, v);
1002 memcpy(dst, &v, sizeof(v));
1003 }
1004
1005 static int compat_standard_to_user(void __user *dst, const void *src)
1006 {
1007 compat_int_t cv = *(int *)src;
1008
1009 if (cv > 0)
1010 cv -= xt_compat_calc_jump(AF_INET, cv);
1011 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1012 }
1013
1014 static int compat_calc_entry(const struct ipt_entry *e,
1015 const struct xt_table_info *info,
1016 const void *base, struct xt_table_info *newinfo)
1017 {
1018 const struct xt_entry_match *ematch;
1019 const struct xt_entry_target *t;
1020 unsigned int entry_offset;
1021 int off, i, ret;
1022
1023 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1024 entry_offset = (void *)e - base;
1025 xt_ematch_foreach(ematch, e)
1026 off += xt_compat_match_offset(ematch->u.kernel.match);
1027 t = ipt_get_target_c(e);
1028 off += xt_compat_target_offset(t->u.kernel.target);
1029 newinfo->size -= off;
1030 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1031 if (ret)
1032 return ret;
1033
1034 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1035 if (info->hook_entry[i] &&
1036 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1037 newinfo->hook_entry[i] -= off;
1038 if (info->underflow[i] &&
1039 (e < (struct ipt_entry *)(base + info->underflow[i])))
1040 newinfo->underflow[i] -= off;
1041 }
1042 return 0;
1043 }
1044
1045 static int compat_table_info(const struct xt_table_info *info,
1046 struct xt_table_info *newinfo)
1047 {
1048 struct ipt_entry *iter;
1049 void *loc_cpu_entry;
1050 int ret;
1051
1052 if (!newinfo || !info)
1053 return -EINVAL;
1054
1055 /* we dont care about newinfo->entries[] */
1056 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1057 newinfo->initial_entries = 0;
1058 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1059 xt_compat_init_offsets(AF_INET, info->number);
1060 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1061 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1062 if (ret != 0)
1063 return ret;
1064 }
1065 return 0;
1066 }
1067 #endif
1068
1069 static int get_info(struct net *net, void __user *user,
1070 const int *len, int compat)
1071 {
1072 char name[XT_TABLE_MAXNAMELEN];
1073 struct xt_table *t;
1074 int ret;
1075
1076 if (*len != sizeof(struct ipt_getinfo)) {
1077 duprintf("length %u != %zu\n", *len,
1078 sizeof(struct ipt_getinfo));
1079 return -EINVAL;
1080 }
1081
1082 if (copy_from_user(name, user, sizeof(name)) != 0)
1083 return -EFAULT;
1084
1085 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1086 #ifdef CONFIG_COMPAT
1087 if (compat)
1088 xt_compat_lock(AF_INET);
1089 #endif
1090 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1091 "iptable_%s", name);
1092 if (!IS_ERR_OR_NULL(t)) {
1093 struct ipt_getinfo info;
1094 const struct xt_table_info *private = t->private;
1095 #ifdef CONFIG_COMPAT
1096 struct xt_table_info tmp;
1097
1098 if (compat) {
1099 ret = compat_table_info(private, &tmp);
1100 xt_compat_flush_offsets(AF_INET);
1101 private = &tmp;
1102 }
1103 #endif
1104 memset(&info, 0, sizeof(info));
1105 info.valid_hooks = t->valid_hooks;
1106 memcpy(info.hook_entry, private->hook_entry,
1107 sizeof(info.hook_entry));
1108 memcpy(info.underflow, private->underflow,
1109 sizeof(info.underflow));
1110 info.num_entries = private->number;
1111 info.size = private->size;
1112 strcpy(info.name, name);
1113
1114 if (copy_to_user(user, &info, *len) != 0)
1115 ret = -EFAULT;
1116 else
1117 ret = 0;
1118
1119 xt_table_unlock(t);
1120 module_put(t->me);
1121 } else
1122 ret = t ? PTR_ERR(t) : -ENOENT;
1123 #ifdef CONFIG_COMPAT
1124 if (compat)
1125 xt_compat_unlock(AF_INET);
1126 #endif
1127 return ret;
1128 }
1129
1130 static int
1131 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1132 const int *len)
1133 {
1134 int ret;
1135 struct ipt_get_entries get;
1136 struct xt_table *t;
1137
1138 if (*len < sizeof(get)) {
1139 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1140 return -EINVAL;
1141 }
1142 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1143 return -EFAULT;
1144 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1145 duprintf("get_entries: %u != %zu\n",
1146 *len, sizeof(get) + get.size);
1147 return -EINVAL;
1148 }
1149
1150 t = xt_find_table_lock(net, AF_INET, get.name);
1151 if (!IS_ERR_OR_NULL(t)) {
1152 const struct xt_table_info *private = t->private;
1153 duprintf("t->private->number = %u\n", private->number);
1154 if (get.size == private->size)
1155 ret = copy_entries_to_user(private->size,
1156 t, uptr->entrytable);
1157 else {
1158 duprintf("get_entries: I've got %u not %u!\n",
1159 private->size, get.size);
1160 ret = -EAGAIN;
1161 }
1162 module_put(t->me);
1163 xt_table_unlock(t);
1164 } else
1165 ret = t ? PTR_ERR(t) : -ENOENT;
1166
1167 return ret;
1168 }
1169
1170 static int
1171 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1172 struct xt_table_info *newinfo, unsigned int num_counters,
1173 void __user *counters_ptr)
1174 {
1175 int ret;
1176 struct xt_table *t;
1177 struct xt_table_info *oldinfo;
1178 struct xt_counters *counters;
1179 void *loc_cpu_old_entry;
1180 struct ipt_entry *iter;
1181
1182 ret = 0;
1183 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1184 if (!counters) {
1185 ret = -ENOMEM;
1186 goto out;
1187 }
1188
1189 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1190 "iptable_%s", name);
1191 if (IS_ERR_OR_NULL(t)) {
1192 ret = t ? PTR_ERR(t) : -ENOENT;
1193 goto free_newinfo_counters_untrans;
1194 }
1195
1196 /* You lied! */
1197 if (valid_hooks != t->valid_hooks) {
1198 duprintf("Valid hook crap: %08X vs %08X\n",
1199 valid_hooks, t->valid_hooks);
1200 ret = -EINVAL;
1201 goto put_module;
1202 }
1203
1204 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1205 if (!oldinfo)
1206 goto put_module;
1207
1208 /* Update module usage count based on number of rules */
1209 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1210 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1211 if ((oldinfo->number > oldinfo->initial_entries) ||
1212 (newinfo->number <= oldinfo->initial_entries))
1213 module_put(t->me);
1214 if ((oldinfo->number > oldinfo->initial_entries) &&
1215 (newinfo->number <= oldinfo->initial_entries))
1216 module_put(t->me);
1217
1218 /* Get the old counters, and synchronize with replace */
1219 get_counters(oldinfo, counters);
1220
1221 /* Decrease module usage counts and free resource */
1222 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1223 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1224 cleanup_entry(iter, net);
1225
1226 xt_free_table_info(oldinfo);
1227 if (copy_to_user(counters_ptr, counters,
1228 sizeof(struct xt_counters) * num_counters) != 0) {
1229 /* Silent error, can't fail, new table is already in place */
1230 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1231 }
1232 vfree(counters);
1233 xt_table_unlock(t);
1234 return ret;
1235
1236 put_module:
1237 module_put(t->me);
1238 xt_table_unlock(t);
1239 free_newinfo_counters_untrans:
1240 vfree(counters);
1241 out:
1242 return ret;
1243 }
1244
1245 static int
1246 do_replace(struct net *net, const void __user *user, unsigned int len)
1247 {
1248 int ret;
1249 struct ipt_replace tmp;
1250 struct xt_table_info *newinfo;
1251 void *loc_cpu_entry;
1252 struct ipt_entry *iter;
1253
1254 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1255 return -EFAULT;
1256
1257 /* overflow check */
1258 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1259 return -ENOMEM;
1260 tmp.name[sizeof(tmp.name)-1] = 0;
1261
1262 newinfo = xt_alloc_table_info(tmp.size);
1263 if (!newinfo)
1264 return -ENOMEM;
1265
1266 /* choose the copy that is on our node/cpu */
1267 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1268 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1269 tmp.size) != 0) {
1270 ret = -EFAULT;
1271 goto free_newinfo;
1272 }
1273
1274 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1275 if (ret != 0)
1276 goto free_newinfo;
1277
1278 duprintf("Translated table\n");
1279
1280 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1281 tmp.num_counters, tmp.counters);
1282 if (ret)
1283 goto free_newinfo_untrans;
1284 return 0;
1285
1286 free_newinfo_untrans:
1287 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1288 cleanup_entry(iter, net);
1289 free_newinfo:
1290 xt_free_table_info(newinfo);
1291 return ret;
1292 }
1293
1294 static int
1295 do_add_counters(struct net *net, const void __user *user,
1296 unsigned int len, int compat)
1297 {
1298 unsigned int i, curcpu;
1299 struct xt_counters_info tmp;
1300 struct xt_counters *paddc;
1301 unsigned int num_counters;
1302 const char *name;
1303 int size;
1304 void *ptmp;
1305 struct xt_table *t;
1306 const struct xt_table_info *private;
1307 int ret = 0;
1308 void *loc_cpu_entry;
1309 struct ipt_entry *iter;
1310 unsigned int addend;
1311 #ifdef CONFIG_COMPAT
1312 struct compat_xt_counters_info compat_tmp;
1313
1314 if (compat) {
1315 ptmp = &compat_tmp;
1316 size = sizeof(struct compat_xt_counters_info);
1317 } else
1318 #endif
1319 {
1320 ptmp = &tmp;
1321 size = sizeof(struct xt_counters_info);
1322 }
1323
1324 if (copy_from_user(ptmp, user, size) != 0)
1325 return -EFAULT;
1326
1327 #ifdef CONFIG_COMPAT
1328 if (compat) {
1329 num_counters = compat_tmp.num_counters;
1330 name = compat_tmp.name;
1331 } else
1332 #endif
1333 {
1334 num_counters = tmp.num_counters;
1335 name = tmp.name;
1336 }
1337
1338 if (len != size + num_counters * sizeof(struct xt_counters))
1339 return -EINVAL;
1340
1341 paddc = vmalloc(len - size);
1342 if (!paddc)
1343 return -ENOMEM;
1344
1345 if (copy_from_user(paddc, user + size, len - size) != 0) {
1346 ret = -EFAULT;
1347 goto free;
1348 }
1349
1350 t = xt_find_table_lock(net, AF_INET, name);
1351 if (IS_ERR_OR_NULL(t)) {
1352 ret = t ? PTR_ERR(t) : -ENOENT;
1353 goto free;
1354 }
1355
1356 local_bh_disable();
1357 private = t->private;
1358 if (private->number != num_counters) {
1359 ret = -EINVAL;
1360 goto unlock_up_free;
1361 }
1362
1363 i = 0;
1364 /* Choose the copy that is on our node */
1365 curcpu = smp_processor_id();
1366 loc_cpu_entry = private->entries[curcpu];
1367 addend = xt_write_recseq_begin();
1368 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1369 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1370 ++i;
1371 }
1372 xt_write_recseq_end(addend);
1373 unlock_up_free:
1374 local_bh_enable();
1375 xt_table_unlock(t);
1376 module_put(t->me);
1377 free:
1378 vfree(paddc);
1379
1380 return ret;
1381 }
1382
1383 #ifdef CONFIG_COMPAT
1384 struct compat_ipt_replace {
1385 char name[XT_TABLE_MAXNAMELEN];
1386 u32 valid_hooks;
1387 u32 num_entries;
1388 u32 size;
1389 u32 hook_entry[NF_INET_NUMHOOKS];
1390 u32 underflow[NF_INET_NUMHOOKS];
1391 u32 num_counters;
1392 compat_uptr_t counters; /* struct xt_counters * */
1393 struct compat_ipt_entry entries[0];
1394 };
1395
1396 static int
1397 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1398 unsigned int *size, struct xt_counters *counters,
1399 unsigned int i)
1400 {
1401 struct xt_entry_target *t;
1402 struct compat_ipt_entry __user *ce;
1403 u_int16_t target_offset, next_offset;
1404 compat_uint_t origsize;
1405 const struct xt_entry_match *ematch;
1406 int ret = 0;
1407
1408 origsize = *size;
1409 ce = (struct compat_ipt_entry __user *)*dstptr;
1410 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1411 copy_to_user(&ce->counters, &counters[i],
1412 sizeof(counters[i])) != 0)
1413 return -EFAULT;
1414
1415 *dstptr += sizeof(struct compat_ipt_entry);
1416 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1417
1418 xt_ematch_foreach(ematch, e) {
1419 ret = xt_compat_match_to_user(ematch, dstptr, size);
1420 if (ret != 0)
1421 return ret;
1422 }
1423 target_offset = e->target_offset - (origsize - *size);
1424 t = ipt_get_target(e);
1425 ret = xt_compat_target_to_user(t, dstptr, size);
1426 if (ret)
1427 return ret;
1428 next_offset = e->next_offset - (origsize - *size);
1429 if (put_user(target_offset, &ce->target_offset) != 0 ||
1430 put_user(next_offset, &ce->next_offset) != 0)
1431 return -EFAULT;
1432 return 0;
1433 }
1434
1435 static int
1436 compat_find_calc_match(struct xt_entry_match *m,
1437 const char *name,
1438 const struct ipt_ip *ip,
1439 unsigned int hookmask,
1440 int *size)
1441 {
1442 struct xt_match *match;
1443
1444 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1445 m->u.user.revision);
1446 if (IS_ERR(match)) {
1447 duprintf("compat_check_calc_match: `%s' not found\n",
1448 m->u.user.name);
1449 return PTR_ERR(match);
1450 }
1451 m->u.kernel.match = match;
1452 *size += xt_compat_match_offset(match);
1453 return 0;
1454 }
1455
1456 static void compat_release_entry(struct compat_ipt_entry *e)
1457 {
1458 struct xt_entry_target *t;
1459 struct xt_entry_match *ematch;
1460
1461 /* Cleanup all matches */
1462 xt_ematch_foreach(ematch, e)
1463 module_put(ematch->u.kernel.match->me);
1464 t = compat_ipt_get_target(e);
1465 module_put(t->u.kernel.target->me);
1466 }
1467
1468 static int
1469 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1470 struct xt_table_info *newinfo,
1471 unsigned int *size,
1472 const unsigned char *base,
1473 const unsigned char *limit,
1474 const unsigned int *hook_entries,
1475 const unsigned int *underflows,
1476 const char *name)
1477 {
1478 struct xt_entry_match *ematch;
1479 struct xt_entry_target *t;
1480 struct xt_target *target;
1481 unsigned int entry_offset;
1482 unsigned int j;
1483 int ret, off, h;
1484
1485 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1486 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1487 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1488 duprintf("Bad offset %p, limit = %p\n", e, limit);
1489 return -EINVAL;
1490 }
1491
1492 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1493 sizeof(struct compat_xt_entry_target)) {
1494 duprintf("checking: element %p size %u\n",
1495 e, e->next_offset);
1496 return -EINVAL;
1497 }
1498
1499 /* For purposes of check_entry casting the compat entry is fine */
1500 ret = check_entry((struct ipt_entry *)e);
1501 if (ret)
1502 return ret;
1503
1504 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1505 entry_offset = (void *)e - (void *)base;
1506 j = 0;
1507 xt_ematch_foreach(ematch, e) {
1508 ret = compat_find_calc_match(ematch, name,
1509 &e->ip, e->comefrom, &off);
1510 if (ret != 0)
1511 goto release_matches;
1512 ++j;
1513 }
1514
1515 t = compat_ipt_get_target(e);
1516 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1517 t->u.user.revision);
1518 if (IS_ERR(target)) {
1519 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1520 t->u.user.name);
1521 ret = PTR_ERR(target);
1522 goto release_matches;
1523 }
1524 t->u.kernel.target = target;
1525
1526 off += xt_compat_target_offset(target);
1527 *size += off;
1528 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1529 if (ret)
1530 goto out;
1531
1532 /* Check hooks & underflows */
1533 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1534 if ((unsigned char *)e - base == hook_entries[h])
1535 newinfo->hook_entry[h] = hook_entries[h];
1536 if ((unsigned char *)e - base == underflows[h])
1537 newinfo->underflow[h] = underflows[h];
1538 }
1539
1540 /* Clear counters and comefrom */
1541 memset(&e->counters, 0, sizeof(e->counters));
1542 e->comefrom = 0;
1543 return 0;
1544
1545 out:
1546 module_put(t->u.kernel.target->me);
1547 release_matches:
1548 xt_ematch_foreach(ematch, e) {
1549 if (j-- == 0)
1550 break;
1551 module_put(ematch->u.kernel.match->me);
1552 }
1553 return ret;
1554 }
1555
1556 static int
1557 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1558 unsigned int *size, const char *name,
1559 struct xt_table_info *newinfo, unsigned char *base)
1560 {
1561 struct xt_entry_target *t;
1562 struct xt_target *target;
1563 struct ipt_entry *de;
1564 unsigned int origsize;
1565 int ret, h;
1566 struct xt_entry_match *ematch;
1567
1568 ret = 0;
1569 origsize = *size;
1570 de = (struct ipt_entry *)*dstptr;
1571 memcpy(de, e, sizeof(struct ipt_entry));
1572 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1573
1574 *dstptr += sizeof(struct ipt_entry);
1575 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1576
1577 xt_ematch_foreach(ematch, e) {
1578 ret = xt_compat_match_from_user(ematch, dstptr, size);
1579 if (ret != 0)
1580 return ret;
1581 }
1582 de->target_offset = e->target_offset - (origsize - *size);
1583 t = compat_ipt_get_target(e);
1584 target = t->u.kernel.target;
1585 xt_compat_target_from_user(t, dstptr, size);
1586
1587 de->next_offset = e->next_offset - (origsize - *size);
1588 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1589 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1590 newinfo->hook_entry[h] -= origsize - *size;
1591 if ((unsigned char *)de - base < newinfo->underflow[h])
1592 newinfo->underflow[h] -= origsize - *size;
1593 }
1594 return ret;
1595 }
1596
1597 static int
1598 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1599 {
1600 struct xt_entry_match *ematch;
1601 struct xt_mtchk_param mtpar;
1602 unsigned int j;
1603 int ret = 0;
1604
1605 j = 0;
1606 mtpar.net = net;
1607 mtpar.table = name;
1608 mtpar.entryinfo = &e->ip;
1609 mtpar.hook_mask = e->comefrom;
1610 mtpar.family = NFPROTO_IPV4;
1611 xt_ematch_foreach(ematch, e) {
1612 ret = check_match(ematch, &mtpar);
1613 if (ret != 0)
1614 goto cleanup_matches;
1615 ++j;
1616 }
1617
1618 ret = check_target(e, net, name);
1619 if (ret)
1620 goto cleanup_matches;
1621 return 0;
1622
1623 cleanup_matches:
1624 xt_ematch_foreach(ematch, e) {
1625 if (j-- == 0)
1626 break;
1627 cleanup_match(ematch, net);
1628 }
1629 return ret;
1630 }
1631
1632 static int
1633 translate_compat_table(struct net *net,
1634 const char *name,
1635 unsigned int valid_hooks,
1636 struct xt_table_info **pinfo,
1637 void **pentry0,
1638 unsigned int total_size,
1639 unsigned int number,
1640 unsigned int *hook_entries,
1641 unsigned int *underflows)
1642 {
1643 unsigned int i, j;
1644 struct xt_table_info *newinfo, *info;
1645 void *pos, *entry0, *entry1;
1646 struct compat_ipt_entry *iter0;
1647 struct ipt_entry *iter1;
1648 unsigned int size;
1649 int ret;
1650
1651 info = *pinfo;
1652 entry0 = *pentry0;
1653 size = total_size;
1654 info->number = number;
1655
1656 /* Init all hooks to impossible value. */
1657 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1658 info->hook_entry[i] = 0xFFFFFFFF;
1659 info->underflow[i] = 0xFFFFFFFF;
1660 }
1661
1662 duprintf("translate_compat_table: size %u\n", info->size);
1663 j = 0;
1664 xt_compat_lock(AF_INET);
1665 xt_compat_init_offsets(AF_INET, number);
1666 /* Walk through entries, checking offsets. */
1667 xt_entry_foreach(iter0, entry0, total_size) {
1668 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1669 entry0,
1670 entry0 + total_size,
1671 hook_entries,
1672 underflows,
1673 name);
1674 if (ret != 0)
1675 goto out_unlock;
1676 ++j;
1677 }
1678
1679 ret = -EINVAL;
1680 if (j != number) {
1681 duprintf("translate_compat_table: %u not %u entries\n",
1682 j, number);
1683 goto out_unlock;
1684 }
1685
1686 /* Check hooks all assigned */
1687 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1688 /* Only hooks which are valid */
1689 if (!(valid_hooks & (1 << i)))
1690 continue;
1691 if (info->hook_entry[i] == 0xFFFFFFFF) {
1692 duprintf("Invalid hook entry %u %u\n",
1693 i, hook_entries[i]);
1694 goto out_unlock;
1695 }
1696 if (info->underflow[i] == 0xFFFFFFFF) {
1697 duprintf("Invalid underflow %u %u\n",
1698 i, underflows[i]);
1699 goto out_unlock;
1700 }
1701 }
1702
1703 ret = -ENOMEM;
1704 newinfo = xt_alloc_table_info(size);
1705 if (!newinfo)
1706 goto out_unlock;
1707
1708 newinfo->number = number;
1709 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1710 newinfo->hook_entry[i] = info->hook_entry[i];
1711 newinfo->underflow[i] = info->underflow[i];
1712 }
1713 entry1 = newinfo->entries[raw_smp_processor_id()];
1714 pos = entry1;
1715 size = total_size;
1716 xt_entry_foreach(iter0, entry0, total_size) {
1717 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1718 name, newinfo, entry1);
1719 if (ret != 0)
1720 break;
1721 }
1722 xt_compat_flush_offsets(AF_INET);
1723 xt_compat_unlock(AF_INET);
1724 if (ret)
1725 goto free_newinfo;
1726
1727 ret = -ELOOP;
1728 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1729 goto free_newinfo;
1730
1731 i = 0;
1732 xt_entry_foreach(iter1, entry1, newinfo->size) {
1733 ret = compat_check_entry(iter1, net, name);
1734 if (ret != 0)
1735 break;
1736 ++i;
1737 if (strcmp(ipt_get_target(iter1)->u.user.name,
1738 XT_ERROR_TARGET) == 0)
1739 ++newinfo->stacksize;
1740 }
1741 if (ret) {
1742 /*
1743 * The first i matches need cleanup_entry (calls ->destroy)
1744 * because they had called ->check already. The other j-i
1745 * entries need only release.
1746 */
1747 int skip = i;
1748 j -= i;
1749 xt_entry_foreach(iter0, entry0, newinfo->size) {
1750 if (skip-- > 0)
1751 continue;
1752 if (j-- == 0)
1753 break;
1754 compat_release_entry(iter0);
1755 }
1756 xt_entry_foreach(iter1, entry1, newinfo->size) {
1757 if (i-- == 0)
1758 break;
1759 cleanup_entry(iter1, net);
1760 }
1761 xt_free_table_info(newinfo);
1762 return ret;
1763 }
1764
1765 /* And one copy for every other CPU */
1766 for_each_possible_cpu(i)
1767 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1768 memcpy(newinfo->entries[i], entry1, newinfo->size);
1769
1770 *pinfo = newinfo;
1771 *pentry0 = entry1;
1772 xt_free_table_info(info);
1773 return 0;
1774
1775 free_newinfo:
1776 xt_free_table_info(newinfo);
1777 out:
1778 xt_entry_foreach(iter0, entry0, total_size) {
1779 if (j-- == 0)
1780 break;
1781 compat_release_entry(iter0);
1782 }
1783 return ret;
1784 out_unlock:
1785 xt_compat_flush_offsets(AF_INET);
1786 xt_compat_unlock(AF_INET);
1787 goto out;
1788 }
1789
1790 static int
1791 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1792 {
1793 int ret;
1794 struct compat_ipt_replace tmp;
1795 struct xt_table_info *newinfo;
1796 void *loc_cpu_entry;
1797 struct ipt_entry *iter;
1798
1799 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1800 return -EFAULT;
1801
1802 /* overflow check */
1803 if (tmp.size >= INT_MAX / num_possible_cpus())
1804 return -ENOMEM;
1805 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1806 return -ENOMEM;
1807 tmp.name[sizeof(tmp.name)-1] = 0;
1808
1809 newinfo = xt_alloc_table_info(tmp.size);
1810 if (!newinfo)
1811 return -ENOMEM;
1812
1813 /* choose the copy that is on our node/cpu */
1814 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1815 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1816 tmp.size) != 0) {
1817 ret = -EFAULT;
1818 goto free_newinfo;
1819 }
1820
1821 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1822 &newinfo, &loc_cpu_entry, tmp.size,
1823 tmp.num_entries, tmp.hook_entry,
1824 tmp.underflow);
1825 if (ret != 0)
1826 goto free_newinfo;
1827
1828 duprintf("compat_do_replace: Translated table\n");
1829
1830 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1831 tmp.num_counters, compat_ptr(tmp.counters));
1832 if (ret)
1833 goto free_newinfo_untrans;
1834 return 0;
1835
1836 free_newinfo_untrans:
1837 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1838 cleanup_entry(iter, net);
1839 free_newinfo:
1840 xt_free_table_info(newinfo);
1841 return ret;
1842 }
1843
1844 static int
1845 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1846 unsigned int len)
1847 {
1848 int ret;
1849
1850 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1851 return -EPERM;
1852
1853 switch (cmd) {
1854 case IPT_SO_SET_REPLACE:
1855 ret = compat_do_replace(sock_net(sk), user, len);
1856 break;
1857
1858 case IPT_SO_SET_ADD_COUNTERS:
1859 ret = do_add_counters(sock_net(sk), user, len, 1);
1860 break;
1861
1862 default:
1863 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1864 ret = -EINVAL;
1865 }
1866
1867 return ret;
1868 }
1869
1870 struct compat_ipt_get_entries {
1871 char name[XT_TABLE_MAXNAMELEN];
1872 compat_uint_t size;
1873 struct compat_ipt_entry entrytable[0];
1874 };
1875
1876 static int
1877 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1878 void __user *userptr)
1879 {
1880 struct xt_counters *counters;
1881 const struct xt_table_info *private = table->private;
1882 void __user *pos;
1883 unsigned int size;
1884 int ret = 0;
1885 const void *loc_cpu_entry;
1886 unsigned int i = 0;
1887 struct ipt_entry *iter;
1888
1889 counters = alloc_counters(table);
1890 if (IS_ERR(counters))
1891 return PTR_ERR(counters);
1892
1893 /* choose the copy that is on our node/cpu, ...
1894 * This choice is lazy (because current thread is
1895 * allowed to migrate to another cpu)
1896 */
1897 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1898 pos = userptr;
1899 size = total_size;
1900 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1901 ret = compat_copy_entry_to_user(iter, &pos,
1902 &size, counters, i++);
1903 if (ret != 0)
1904 break;
1905 }
1906
1907 vfree(counters);
1908 return ret;
1909 }
1910
1911 static int
1912 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1913 int *len)
1914 {
1915 int ret;
1916 struct compat_ipt_get_entries get;
1917 struct xt_table *t;
1918
1919 if (*len < sizeof(get)) {
1920 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1921 return -EINVAL;
1922 }
1923
1924 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1925 return -EFAULT;
1926
1927 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1928 duprintf("compat_get_entries: %u != %zu\n",
1929 *len, sizeof(get) + get.size);
1930 return -EINVAL;
1931 }
1932
1933 xt_compat_lock(AF_INET);
1934 t = xt_find_table_lock(net, AF_INET, get.name);
1935 if (!IS_ERR_OR_NULL(t)) {
1936 const struct xt_table_info *private = t->private;
1937 struct xt_table_info info;
1938 duprintf("t->private->number = %u\n", private->number);
1939 ret = compat_table_info(private, &info);
1940 if (!ret && get.size == info.size) {
1941 ret = compat_copy_entries_to_user(private->size,
1942 t, uptr->entrytable);
1943 } else if (!ret) {
1944 duprintf("compat_get_entries: I've got %u not %u!\n",
1945 private->size, get.size);
1946 ret = -EAGAIN;
1947 }
1948 xt_compat_flush_offsets(AF_INET);
1949 module_put(t->me);
1950 xt_table_unlock(t);
1951 } else
1952 ret = t ? PTR_ERR(t) : -ENOENT;
1953
1954 xt_compat_unlock(AF_INET);
1955 return ret;
1956 }
1957
1958 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1959
1960 static int
1961 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1962 {
1963 int ret;
1964
1965 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1966 return -EPERM;
1967
1968 switch (cmd) {
1969 case IPT_SO_GET_INFO:
1970 ret = get_info(sock_net(sk), user, len, 1);
1971 break;
1972 case IPT_SO_GET_ENTRIES:
1973 ret = compat_get_entries(sock_net(sk), user, len);
1974 break;
1975 default:
1976 ret = do_ipt_get_ctl(sk, cmd, user, len);
1977 }
1978 return ret;
1979 }
1980 #endif
1981
1982 static int
1983 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1984 {
1985 int ret;
1986
1987 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1988 return -EPERM;
1989
1990 switch (cmd) {
1991 case IPT_SO_SET_REPLACE:
1992 ret = do_replace(sock_net(sk), user, len);
1993 break;
1994
1995 case IPT_SO_SET_ADD_COUNTERS:
1996 ret = do_add_counters(sock_net(sk), user, len, 0);
1997 break;
1998
1999 default:
2000 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2001 ret = -EINVAL;
2002 }
2003
2004 return ret;
2005 }
2006
2007 static int
2008 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2009 {
2010 int ret;
2011
2012 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2013 return -EPERM;
2014
2015 switch (cmd) {
2016 case IPT_SO_GET_INFO:
2017 ret = get_info(sock_net(sk), user, len, 0);
2018 break;
2019
2020 case IPT_SO_GET_ENTRIES:
2021 ret = get_entries(sock_net(sk), user, len);
2022 break;
2023
2024 case IPT_SO_GET_REVISION_MATCH:
2025 case IPT_SO_GET_REVISION_TARGET: {
2026 struct xt_get_revision rev;
2027 int target;
2028
2029 if (*len != sizeof(rev)) {
2030 ret = -EINVAL;
2031 break;
2032 }
2033 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2034 ret = -EFAULT;
2035 break;
2036 }
2037 rev.name[sizeof(rev.name)-1] = 0;
2038
2039 if (cmd == IPT_SO_GET_REVISION_TARGET)
2040 target = 1;
2041 else
2042 target = 0;
2043
2044 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2045 rev.revision,
2046 target, &ret),
2047 "ipt_%s", rev.name);
2048 break;
2049 }
2050
2051 default:
2052 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2053 ret = -EINVAL;
2054 }
2055
2056 return ret;
2057 }
2058
2059 struct xt_table *ipt_register_table(struct net *net,
2060 const struct xt_table *table,
2061 const struct ipt_replace *repl)
2062 {
2063 int ret;
2064 struct xt_table_info *newinfo;
2065 struct xt_table_info bootstrap = {0};
2066 void *loc_cpu_entry;
2067 struct xt_table *new_table;
2068
2069 newinfo = xt_alloc_table_info(repl->size);
2070 if (!newinfo) {
2071 ret = -ENOMEM;
2072 goto out;
2073 }
2074
2075 /* choose the copy on our node/cpu, but dont care about preemption */
2076 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2077 memcpy(loc_cpu_entry, repl->entries, repl->size);
2078
2079 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2080 if (ret != 0)
2081 goto out_free;
2082
2083 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2084 if (IS_ERR(new_table)) {
2085 ret = PTR_ERR(new_table);
2086 goto out_free;
2087 }
2088
2089 return new_table;
2090
2091 out_free:
2092 xt_free_table_info(newinfo);
2093 out:
2094 return ERR_PTR(ret);
2095 }
2096
2097 void ipt_unregister_table(struct net *net, struct xt_table *table)
2098 {
2099 struct xt_table_info *private;
2100 void *loc_cpu_entry;
2101 struct module *table_owner = table->me;
2102 struct ipt_entry *iter;
2103
2104 private = xt_unregister_table(table);
2105
2106 /* Decrease module usage counts and free resources */
2107 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2108 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2109 cleanup_entry(iter, net);
2110 if (private->number > private->initial_entries)
2111 module_put(table_owner);
2112 xt_free_table_info(private);
2113 }
2114
2115 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2116 static inline bool
2117 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2118 u_int8_t type, u_int8_t code,
2119 bool invert)
2120 {
2121 return ((test_type == 0xFF) ||
2122 (type == test_type && code >= min_code && code <= max_code))
2123 ^ invert;
2124 }
2125
2126 static bool
2127 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2128 {
2129 const struct icmphdr *ic;
2130 struct icmphdr _icmph;
2131 const struct ipt_icmp *icmpinfo = par->matchinfo;
2132
2133 /* Must not be a fragment. */
2134 if (par->fragoff != 0)
2135 return false;
2136
2137 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2138 if (ic == NULL) {
2139 /* We've been asked to examine this packet, and we
2140 * can't. Hence, no choice but to drop.
2141 */
2142 duprintf("Dropping evil ICMP tinygram.\n");
2143 par->hotdrop = true;
2144 return false;
2145 }
2146
2147 return icmp_type_code_match(icmpinfo->type,
2148 icmpinfo->code[0],
2149 icmpinfo->code[1],
2150 ic->type, ic->code,
2151 !!(icmpinfo->invflags&IPT_ICMP_INV));
2152 }
2153
2154 static int icmp_checkentry(const struct xt_mtchk_param *par)
2155 {
2156 const struct ipt_icmp *icmpinfo = par->matchinfo;
2157
2158 /* Must specify no unknown invflags */
2159 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2160 }
2161
2162 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2163 {
2164 .name = XT_STANDARD_TARGET,
2165 .targetsize = sizeof(int),
2166 .family = NFPROTO_IPV4,
2167 #ifdef CONFIG_COMPAT
2168 .compatsize = sizeof(compat_int_t),
2169 .compat_from_user = compat_standard_from_user,
2170 .compat_to_user = compat_standard_to_user,
2171 #endif
2172 },
2173 {
2174 .name = XT_ERROR_TARGET,
2175 .target = ipt_error,
2176 .targetsize = XT_FUNCTION_MAXNAMELEN,
2177 .family = NFPROTO_IPV4,
2178 },
2179 };
2180
2181 static struct nf_sockopt_ops ipt_sockopts = {
2182 .pf = PF_INET,
2183 .set_optmin = IPT_BASE_CTL,
2184 .set_optmax = IPT_SO_SET_MAX+1,
2185 .set = do_ipt_set_ctl,
2186 #ifdef CONFIG_COMPAT
2187 .compat_set = compat_do_ipt_set_ctl,
2188 #endif
2189 .get_optmin = IPT_BASE_CTL,
2190 .get_optmax = IPT_SO_GET_MAX+1,
2191 .get = do_ipt_get_ctl,
2192 #ifdef CONFIG_COMPAT
2193 .compat_get = compat_do_ipt_get_ctl,
2194 #endif
2195 .owner = THIS_MODULE,
2196 };
2197
2198 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2199 {
2200 .name = "icmp",
2201 .match = icmp_match,
2202 .matchsize = sizeof(struct ipt_icmp),
2203 .checkentry = icmp_checkentry,
2204 .proto = IPPROTO_ICMP,
2205 .family = NFPROTO_IPV4,
2206 },
2207 };
2208
2209 static int __net_init ip_tables_net_init(struct net *net)
2210 {
2211 return xt_proto_init(net, NFPROTO_IPV4);
2212 }
2213
2214 static void __net_exit ip_tables_net_exit(struct net *net)
2215 {
2216 xt_proto_fini(net, NFPROTO_IPV4);
2217 }
2218
2219 static struct pernet_operations ip_tables_net_ops = {
2220 .init = ip_tables_net_init,
2221 .exit = ip_tables_net_exit,
2222 };
2223
2224 static int __init ip_tables_init(void)
2225 {
2226 int ret;
2227
2228 ret = register_pernet_subsys(&ip_tables_net_ops);
2229 if (ret < 0)
2230 goto err1;
2231
2232 /* No one else will be downing sem now, so we won't sleep */
2233 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2234 if (ret < 0)
2235 goto err2;
2236 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2237 if (ret < 0)
2238 goto err4;
2239
2240 /* Register setsockopt */
2241 ret = nf_register_sockopt(&ipt_sockopts);
2242 if (ret < 0)
2243 goto err5;
2244
2245 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2246 return 0;
2247
2248 err5:
2249 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2250 err4:
2251 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2252 err2:
2253 unregister_pernet_subsys(&ip_tables_net_ops);
2254 err1:
2255 return ret;
2256 }
2257
2258 static void __exit ip_tables_fini(void)
2259 {
2260 nf_unregister_sockopt(&ipt_sockopts);
2261
2262 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2263 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2264 unregister_pernet_subsys(&ip_tables_net_ops);
2265 }
2266
2267 EXPORT_SYMBOL(ipt_register_table);
2268 EXPORT_SYMBOL(ipt_unregister_table);
2269 EXPORT_SYMBOL(ipt_do_table);
2270 module_init(ip_tables_init);
2271 module_exit(ip_tables_fini);