netfilter: x_tables: add compat version of xt_check_entry_offsets
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
21 #include <net/ip.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65
66 void *ipt_alloc_initial_table(const struct xt_table *info)
67 {
68 return xt_alloc_initial_table(ipt, IPT);
69 }
70 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
71
72 /* Returns whether matches rule or not. */
73 /* Performance critical - called for every packet */
74 static inline bool
75 ip_packet_match(const struct iphdr *ip,
76 const char *indev,
77 const char *outdev,
78 const struct ipt_ip *ipinfo,
79 int isfrag)
80 {
81 unsigned long ret;
82
83 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
84
85 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
86 IPT_INV_SRCIP) ||
87 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
88 IPT_INV_DSTIP)) {
89 dprintf("Source or dest mismatch.\n");
90
91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
93 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
96 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
97 return false;
98 }
99
100 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
101
102 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
103 dprintf("VIA in mismatch (%s vs %s).%s\n",
104 indev, ipinfo->iniface,
105 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
106 return false;
107 }
108
109 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
110
111 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
112 dprintf("VIA out mismatch (%s vs %s).%s\n",
113 outdev, ipinfo->outiface,
114 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
115 return false;
116 }
117
118 /* Check specific protocol */
119 if (ipinfo->proto &&
120 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
121 dprintf("Packet protocol %hi does not match %hi.%s\n",
122 ip->protocol, ipinfo->proto,
123 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
124 return false;
125 }
126
127 /* If we have a fragment rule but the packet is not a fragment
128 * then we return zero */
129 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
130 dprintf("Fragment rule but not fragment.%s\n",
131 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
132 return false;
133 }
134
135 return true;
136 }
137
138 static bool
139 ip_checkentry(const struct ipt_ip *ip)
140 {
141 if (ip->flags & ~IPT_F_MASK) {
142 duprintf("Unknown flag bits set: %08X\n",
143 ip->flags & ~IPT_F_MASK);
144 return false;
145 }
146 if (ip->invflags & ~IPT_INV_MASK) {
147 duprintf("Unknown invflag bits set: %08X\n",
148 ip->invflags & ~IPT_INV_MASK);
149 return false;
150 }
151 return true;
152 }
153
154 static unsigned int
155 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
156 {
157 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
158
159 return NF_DROP;
160 }
161
162 /* Performance critical */
163 static inline struct ipt_entry *
164 get_entry(const void *base, unsigned int offset)
165 {
166 return (struct ipt_entry *)(base + offset);
167 }
168
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_entry *e)
172 {
173 static const struct ipt_ip uncond;
174
175 return e->target_offset == sizeof(struct ipt_entry) &&
176 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
177 #undef FWINV
178 }
179
180 /* for const-correctness */
181 static inline const struct xt_entry_target *
182 ipt_get_target_c(const struct ipt_entry *e)
183 {
184 return ipt_get_target((struct ipt_entry *)e);
185 }
186
187 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
188 static const char *const hooknames[] = {
189 [NF_INET_PRE_ROUTING] = "PREROUTING",
190 [NF_INET_LOCAL_IN] = "INPUT",
191 [NF_INET_FORWARD] = "FORWARD",
192 [NF_INET_LOCAL_OUT] = "OUTPUT",
193 [NF_INET_POST_ROUTING] = "POSTROUTING",
194 };
195
196 enum nf_ip_trace_comments {
197 NF_IP_TRACE_COMMENT_RULE,
198 NF_IP_TRACE_COMMENT_RETURN,
199 NF_IP_TRACE_COMMENT_POLICY,
200 };
201
202 static const char *const comments[] = {
203 [NF_IP_TRACE_COMMENT_RULE] = "rule",
204 [NF_IP_TRACE_COMMENT_RETURN] = "return",
205 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
206 };
207
208 static struct nf_loginfo trace_loginfo = {
209 .type = NF_LOG_TYPE_LOG,
210 .u = {
211 .log = {
212 .level = 4,
213 .logflags = NF_LOG_MASK,
214 },
215 },
216 };
217
218 /* Mildly perf critical (only if packet tracing is on) */
219 static inline int
220 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
221 const char *hookname, const char **chainname,
222 const char **comment, unsigned int *rulenum)
223 {
224 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
225
226 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
227 /* Head of user chain: ERROR target with chainname */
228 *chainname = t->target.data;
229 (*rulenum) = 0;
230 } else if (s == e) {
231 (*rulenum)++;
232
233 if (unconditional(s) &&
234 strcmp(t->target.u.kernel.target->name,
235 XT_STANDARD_TARGET) == 0 &&
236 t->verdict < 0) {
237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment = *chainname == hookname
239 ? comments[NF_IP_TRACE_COMMENT_POLICY]
240 : comments[NF_IP_TRACE_COMMENT_RETURN];
241 }
242 return 1;
243 } else
244 (*rulenum)++;
245
246 return 0;
247 }
248
249 static void trace_packet(const struct sk_buff *skb,
250 unsigned int hook,
251 const struct net_device *in,
252 const struct net_device *out,
253 const char *tablename,
254 const struct xt_table_info *private,
255 const struct ipt_entry *e)
256 {
257 const void *table_base;
258 const struct ipt_entry *root;
259 const char *hookname, *chainname, *comment;
260 const struct ipt_entry *iter;
261 unsigned int rulenum = 0;
262 struct net *net = dev_net(in ? in : out);
263
264 table_base = private->entries[smp_processor_id()];
265 root = get_entry(table_base, private->hook_entry[hook]);
266
267 hookname = chainname = hooknames[hook];
268 comment = comments[NF_IP_TRACE_COMMENT_RULE];
269
270 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
271 if (get_chainname_rulenum(iter, e, hookname,
272 &chainname, &comment, &rulenum) != 0)
273 break;
274
275 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276 "TRACE: %s:%s:%s:%u ",
277 tablename, chainname, comment, rulenum);
278 }
279 #endif
280
281 static inline __pure
282 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
283 {
284 return (void *)entry + entry->next_offset;
285 }
286
287 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
288 unsigned int
289 ipt_do_table(struct sk_buff *skb,
290 unsigned int hook,
291 const struct net_device *in,
292 const struct net_device *out,
293 struct xt_table *table)
294 {
295 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
296 const struct iphdr *ip;
297 /* Initializing verdict to NF_DROP keeps gcc happy. */
298 unsigned int verdict = NF_DROP;
299 const char *indev, *outdev;
300 const void *table_base;
301 struct ipt_entry *e, **jumpstack;
302 unsigned int *stackptr, origptr, cpu;
303 const struct xt_table_info *private;
304 struct xt_action_param acpar;
305 unsigned int addend;
306
307 /* Initialization */
308 ip = ip_hdr(skb);
309 indev = in ? in->name : nulldevname;
310 outdev = out ? out->name : nulldevname;
311 /* We handle fragments by dealing with the first fragment as
312 * if it was a normal packet. All other fragments are treated
313 * normally, except that they will NEVER match rules that ask
314 * things we don't know, ie. tcp syn flag or ports). If the
315 * rule is also a fragment-specific rule, non-fragments won't
316 * match it. */
317 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
318 acpar.thoff = ip_hdrlen(skb);
319 acpar.hotdrop = false;
320 acpar.in = in;
321 acpar.out = out;
322 acpar.family = NFPROTO_IPV4;
323 acpar.hooknum = hook;
324
325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
326 local_bh_disable();
327 addend = xt_write_recseq_begin();
328 private = table->private;
329 cpu = smp_processor_id();
330 table_base = private->entries[cpu];
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
332 stackptr = per_cpu_ptr(private->stackptr, cpu);
333 origptr = *stackptr;
334
335 e = get_entry(table_base, private->hook_entry[hook]);
336
337 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
338 table->name, hook, origptr,
339 get_entry(table_base, private->underflow[hook]));
340
341 do {
342 const struct xt_entry_target *t;
343 const struct xt_entry_match *ematch;
344
345 IP_NF_ASSERT(e);
346 if (!ip_packet_match(ip, indev, outdev,
347 &e->ip, acpar.fragoff)) {
348 no_match:
349 e = ipt_next_entry(e);
350 continue;
351 }
352
353 xt_ematch_foreach(ematch, e) {
354 acpar.match = ematch->u.kernel.match;
355 acpar.matchinfo = ematch->data;
356 if (!acpar.match->match(skb, &acpar))
357 goto no_match;
358 }
359
360 ADD_COUNTER(e->counters, skb->len, 1);
361
362 t = ipt_get_target(e);
363 IP_NF_ASSERT(t->u.kernel.target);
364
365 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
366 /* The packet is traced: log it */
367 if (unlikely(skb->nf_trace))
368 trace_packet(skb, hook, in, out,
369 table->name, private, e);
370 #endif
371 /* Standard target? */
372 if (!t->u.kernel.target->target) {
373 int v;
374
375 v = ((struct xt_standard_target *)t)->verdict;
376 if (v < 0) {
377 /* Pop from stack? */
378 if (v != XT_RETURN) {
379 verdict = (unsigned int)(-v) - 1;
380 break;
381 }
382 if (*stackptr <= origptr) {
383 e = get_entry(table_base,
384 private->underflow[hook]);
385 pr_debug("Underflow (this is normal) "
386 "to %p\n", e);
387 } else {
388 e = jumpstack[--*stackptr];
389 pr_debug("Pulled %p out from pos %u\n",
390 e, *stackptr);
391 e = ipt_next_entry(e);
392 }
393 continue;
394 }
395 if (table_base + v != ipt_next_entry(e) &&
396 !(e->ip.flags & IPT_F_GOTO)) {
397 if (*stackptr >= private->stacksize) {
398 verdict = NF_DROP;
399 break;
400 }
401 jumpstack[(*stackptr)++] = e;
402 pr_debug("Pushed %p into pos %u\n",
403 e, *stackptr - 1);
404 }
405
406 e = get_entry(table_base, v);
407 continue;
408 }
409
410 acpar.target = t->u.kernel.target;
411 acpar.targinfo = t->data;
412
413 verdict = t->u.kernel.target->target(skb, &acpar);
414 /* Target might have changed stuff. */
415 ip = ip_hdr(skb);
416 if (verdict == XT_CONTINUE)
417 e = ipt_next_entry(e);
418 else
419 /* Verdict */
420 break;
421 } while (!acpar.hotdrop);
422 pr_debug("Exiting %s; resetting sp from %u to %u\n",
423 __func__, *stackptr, origptr);
424 *stackptr = origptr;
425 xt_write_recseq_end(addend);
426 local_bh_enable();
427
428 #ifdef DEBUG_ALLOW_ALL
429 return NF_ACCEPT;
430 #else
431 if (acpar.hotdrop)
432 return NF_DROP;
433 else return verdict;
434 #endif
435 }
436
437 /* Figures out from what hook each rule can be called: returns 0 if
438 there are loops. Puts hook bitmask in comefrom. */
439 static int
440 mark_source_chains(const struct xt_table_info *newinfo,
441 unsigned int valid_hooks, void *entry0)
442 {
443 unsigned int hook;
444
445 /* No recursion; use packet counter to save back ptrs (reset
446 to 0 as we leave), and comefrom to save source hook bitmask */
447 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
448 unsigned int pos = newinfo->hook_entry[hook];
449 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
450
451 if (!(valid_hooks & (1 << hook)))
452 continue;
453
454 /* Set initial back pointer. */
455 e->counters.pcnt = pos;
456
457 for (;;) {
458 const struct xt_standard_target *t
459 = (void *)ipt_get_target_c(e);
460 int visited = e->comefrom & (1 << hook);
461
462 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
463 pr_err("iptables: loop hook %u pos %u %08X.\n",
464 hook, pos, e->comefrom);
465 return 0;
466 }
467 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
468
469 /* Unconditional return/END. */
470 if ((unconditional(e) &&
471 (strcmp(t->target.u.user.name,
472 XT_STANDARD_TARGET) == 0) &&
473 t->verdict < 0) || visited) {
474 unsigned int oldpos, size;
475
476 if ((strcmp(t->target.u.user.name,
477 XT_STANDARD_TARGET) == 0) &&
478 t->verdict < -NF_MAX_VERDICT - 1) {
479 duprintf("mark_source_chains: bad "
480 "negative verdict (%i)\n",
481 t->verdict);
482 return 0;
483 }
484
485 /* Return: backtrack through the last
486 big jump. */
487 do {
488 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
489 #ifdef DEBUG_IP_FIREWALL_USER
490 if (e->comefrom
491 & (1 << NF_INET_NUMHOOKS)) {
492 duprintf("Back unset "
493 "on hook %u "
494 "rule %u\n",
495 hook, pos);
496 }
497 #endif
498 oldpos = pos;
499 pos = e->counters.pcnt;
500 e->counters.pcnt = 0;
501
502 /* We're at the start. */
503 if (pos == oldpos)
504 goto next;
505
506 e = (struct ipt_entry *)
507 (entry0 + pos);
508 } while (oldpos == pos + e->next_offset);
509
510 /* Move along one */
511 size = e->next_offset;
512 e = (struct ipt_entry *)
513 (entry0 + pos + size);
514 if (pos + size >= newinfo->size)
515 return 0;
516 e->counters.pcnt = pos;
517 pos += size;
518 } else {
519 int newpos = t->verdict;
520
521 if (strcmp(t->target.u.user.name,
522 XT_STANDARD_TARGET) == 0 &&
523 newpos >= 0) {
524 if (newpos > newinfo->size -
525 sizeof(struct ipt_entry)) {
526 duprintf("mark_source_chains: "
527 "bad verdict (%i)\n",
528 newpos);
529 return 0;
530 }
531 /* This a jump; chase it. */
532 duprintf("Jump rule %u -> %u\n",
533 pos, newpos);
534 } else {
535 /* ... this is a fallthru */
536 newpos = pos + e->next_offset;
537 if (newpos >= newinfo->size)
538 return 0;
539 }
540 e = (struct ipt_entry *)
541 (entry0 + newpos);
542 e->counters.pcnt = pos;
543 pos = newpos;
544 }
545 }
546 next:
547 duprintf("Finished chain %u\n", hook);
548 }
549 return 1;
550 }
551
552 static void cleanup_match(struct xt_entry_match *m, struct net *net)
553 {
554 struct xt_mtdtor_param par;
555
556 par.net = net;
557 par.match = m->u.kernel.match;
558 par.matchinfo = m->data;
559 par.family = NFPROTO_IPV4;
560 if (par.match->destroy != NULL)
561 par.match->destroy(&par);
562 module_put(par.match->me);
563 }
564
565 static int
566 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
567 {
568 const struct ipt_ip *ip = par->entryinfo;
569 int ret;
570
571 par->match = m->u.kernel.match;
572 par->matchinfo = m->data;
573
574 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
575 ip->proto, ip->invflags & IPT_INV_PROTO);
576 if (ret < 0) {
577 duprintf("check failed for `%s'.\n", par->match->name);
578 return ret;
579 }
580 return 0;
581 }
582
583 static int
584 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
585 {
586 struct xt_match *match;
587 int ret;
588
589 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
590 m->u.user.revision);
591 if (IS_ERR(match)) {
592 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
593 return PTR_ERR(match);
594 }
595 m->u.kernel.match = match;
596
597 ret = check_match(m, par);
598 if (ret)
599 goto err;
600
601 return 0;
602 err:
603 module_put(m->u.kernel.match->me);
604 return ret;
605 }
606
607 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
608 {
609 struct xt_entry_target *t = ipt_get_target(e);
610 struct xt_tgchk_param par = {
611 .net = net,
612 .table = name,
613 .entryinfo = e,
614 .target = t->u.kernel.target,
615 .targinfo = t->data,
616 .hook_mask = e->comefrom,
617 .family = NFPROTO_IPV4,
618 };
619 int ret;
620
621 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
622 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
623 if (ret < 0) {
624 duprintf("check failed for `%s'.\n",
625 t->u.kernel.target->name);
626 return ret;
627 }
628 return 0;
629 }
630
631 static int
632 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
633 unsigned int size)
634 {
635 struct xt_entry_target *t;
636 struct xt_target *target;
637 int ret;
638 unsigned int j;
639 struct xt_mtchk_param mtpar;
640 struct xt_entry_match *ematch;
641
642 j = 0;
643 mtpar.net = net;
644 mtpar.table = name;
645 mtpar.entryinfo = &e->ip;
646 mtpar.hook_mask = e->comefrom;
647 mtpar.family = NFPROTO_IPV4;
648 xt_ematch_foreach(ematch, e) {
649 ret = find_check_match(ematch, &mtpar);
650 if (ret != 0)
651 goto cleanup_matches;
652 ++j;
653 }
654
655 t = ipt_get_target(e);
656 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
657 t->u.user.revision);
658 if (IS_ERR(target)) {
659 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
660 ret = PTR_ERR(target);
661 goto cleanup_matches;
662 }
663 t->u.kernel.target = target;
664
665 ret = check_target(e, net, name);
666 if (ret)
667 goto err;
668 return 0;
669 err:
670 module_put(t->u.kernel.target->me);
671 cleanup_matches:
672 xt_ematch_foreach(ematch, e) {
673 if (j-- == 0)
674 break;
675 cleanup_match(ematch, net);
676 }
677 return ret;
678 }
679
680 static bool check_underflow(const struct ipt_entry *e)
681 {
682 const struct xt_entry_target *t;
683 unsigned int verdict;
684
685 if (!unconditional(e))
686 return false;
687 t = ipt_get_target_c(e);
688 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
689 return false;
690 verdict = ((struct xt_standard_target *)t)->verdict;
691 verdict = -verdict - 1;
692 return verdict == NF_DROP || verdict == NF_ACCEPT;
693 }
694
695 static int
696 check_entry_size_and_hooks(struct ipt_entry *e,
697 struct xt_table_info *newinfo,
698 const unsigned char *base,
699 const unsigned char *limit,
700 const unsigned int *hook_entries,
701 const unsigned int *underflows,
702 unsigned int valid_hooks)
703 {
704 unsigned int h;
705 int err;
706
707 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
708 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
709 (unsigned char *)e + e->next_offset > limit) {
710 duprintf("Bad offset %p\n", e);
711 return -EINVAL;
712 }
713
714 if (e->next_offset
715 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
716 duprintf("checking: element %p size %u\n",
717 e, e->next_offset);
718 return -EINVAL;
719 }
720
721 if (!ip_checkentry(&e->ip))
722 return -EINVAL;
723
724 err = xt_check_entry_offsets(e, e->target_offset, e->next_offset);
725 if (err)
726 return err;
727
728 /* Check hooks & underflows */
729 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
730 if (!(valid_hooks & (1 << h)))
731 continue;
732 if ((unsigned char *)e - base == hook_entries[h])
733 newinfo->hook_entry[h] = hook_entries[h];
734 if ((unsigned char *)e - base == underflows[h]) {
735 if (!check_underflow(e)) {
736 pr_debug("Underflows must be unconditional and "
737 "use the STANDARD target with "
738 "ACCEPT/DROP\n");
739 return -EINVAL;
740 }
741 newinfo->underflow[h] = underflows[h];
742 }
743 }
744
745 /* Clear counters and comefrom */
746 e->counters = ((struct xt_counters) { 0, 0 });
747 e->comefrom = 0;
748 return 0;
749 }
750
751 static void
752 cleanup_entry(struct ipt_entry *e, struct net *net)
753 {
754 struct xt_tgdtor_param par;
755 struct xt_entry_target *t;
756 struct xt_entry_match *ematch;
757
758 /* Cleanup all matches */
759 xt_ematch_foreach(ematch, e)
760 cleanup_match(ematch, net);
761 t = ipt_get_target(e);
762
763 par.net = net;
764 par.target = t->u.kernel.target;
765 par.targinfo = t->data;
766 par.family = NFPROTO_IPV4;
767 if (par.target->destroy != NULL)
768 par.target->destroy(&par);
769 module_put(par.target->me);
770 }
771
772 /* Checks and translates the user-supplied table segment (held in
773 newinfo) */
774 static int
775 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
776 const struct ipt_replace *repl)
777 {
778 struct ipt_entry *iter;
779 unsigned int i;
780 int ret = 0;
781
782 newinfo->size = repl->size;
783 newinfo->number = repl->num_entries;
784
785 /* Init all hooks to impossible value. */
786 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
787 newinfo->hook_entry[i] = 0xFFFFFFFF;
788 newinfo->underflow[i] = 0xFFFFFFFF;
789 }
790
791 duprintf("translate_table: size %u\n", newinfo->size);
792 i = 0;
793 /* Walk through entries, checking offsets. */
794 xt_entry_foreach(iter, entry0, newinfo->size) {
795 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
796 entry0 + repl->size,
797 repl->hook_entry,
798 repl->underflow,
799 repl->valid_hooks);
800 if (ret != 0)
801 return ret;
802 ++i;
803 if (strcmp(ipt_get_target(iter)->u.user.name,
804 XT_ERROR_TARGET) == 0)
805 ++newinfo->stacksize;
806 }
807
808 if (i != repl->num_entries) {
809 duprintf("translate_table: %u not %u entries\n",
810 i, repl->num_entries);
811 return -EINVAL;
812 }
813
814 /* Check hooks all assigned */
815 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
816 /* Only hooks which are valid */
817 if (!(repl->valid_hooks & (1 << i)))
818 continue;
819 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
820 duprintf("Invalid hook entry %u %u\n",
821 i, repl->hook_entry[i]);
822 return -EINVAL;
823 }
824 if (newinfo->underflow[i] == 0xFFFFFFFF) {
825 duprintf("Invalid underflow %u %u\n",
826 i, repl->underflow[i]);
827 return -EINVAL;
828 }
829 }
830
831 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
832 return -ELOOP;
833
834 /* Finally, each sanity check must pass */
835 i = 0;
836 xt_entry_foreach(iter, entry0, newinfo->size) {
837 ret = find_check_entry(iter, net, repl->name, repl->size);
838 if (ret != 0)
839 break;
840 ++i;
841 }
842
843 if (ret != 0) {
844 xt_entry_foreach(iter, entry0, newinfo->size) {
845 if (i-- == 0)
846 break;
847 cleanup_entry(iter, net);
848 }
849 return ret;
850 }
851
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
856 }
857
858 return ret;
859 }
860
861 static void
862 get_counters(const struct xt_table_info *t,
863 struct xt_counters counters[])
864 {
865 struct ipt_entry *iter;
866 unsigned int cpu;
867 unsigned int i;
868
869 for_each_possible_cpu(cpu) {
870 seqcount_t *s = &per_cpu(xt_recseq, cpu);
871
872 i = 0;
873 xt_entry_foreach(iter, t->entries[cpu], t->size) {
874 u64 bcnt, pcnt;
875 unsigned int start;
876
877 do {
878 start = read_seqcount_begin(s);
879 bcnt = iter->counters.bcnt;
880 pcnt = iter->counters.pcnt;
881 } while (read_seqcount_retry(s, start));
882
883 ADD_COUNTER(counters[i], bcnt, pcnt);
884 ++i; /* macro does multi eval of i */
885 }
886 }
887 }
888
889 static struct xt_counters *alloc_counters(const struct xt_table *table)
890 {
891 unsigned int countersize;
892 struct xt_counters *counters;
893 const struct xt_table_info *private = table->private;
894
895 /* We need atomic snapshot of counters: rest doesn't change
896 (other than comefrom, which userspace doesn't care
897 about). */
898 countersize = sizeof(struct xt_counters) * private->number;
899 counters = vzalloc(countersize);
900
901 if (counters == NULL)
902 return ERR_PTR(-ENOMEM);
903
904 get_counters(private, counters);
905
906 return counters;
907 }
908
909 static int
910 copy_entries_to_user(unsigned int total_size,
911 const struct xt_table *table,
912 void __user *userptr)
913 {
914 unsigned int off, num;
915 const struct ipt_entry *e;
916 struct xt_counters *counters;
917 const struct xt_table_info *private = table->private;
918 int ret = 0;
919 const void *loc_cpu_entry;
920
921 counters = alloc_counters(table);
922 if (IS_ERR(counters))
923 return PTR_ERR(counters);
924
925 /* choose the copy that is on our node/cpu, ...
926 * This choice is lazy (because current thread is
927 * allowed to migrate to another cpu)
928 */
929 loc_cpu_entry = private->entries[raw_smp_processor_id()];
930 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
931 ret = -EFAULT;
932 goto free_counters;
933 }
934
935 /* FIXME: use iterator macros --RR */
936 /* ... then go back and fix counters and names */
937 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
938 unsigned int i;
939 const struct xt_entry_match *m;
940 const struct xt_entry_target *t;
941
942 e = (struct ipt_entry *)(loc_cpu_entry + off);
943 if (copy_to_user(userptr + off
944 + offsetof(struct ipt_entry, counters),
945 &counters[num],
946 sizeof(counters[num])) != 0) {
947 ret = -EFAULT;
948 goto free_counters;
949 }
950
951 for (i = sizeof(struct ipt_entry);
952 i < e->target_offset;
953 i += m->u.match_size) {
954 m = (void *)e + i;
955
956 if (copy_to_user(userptr + off + i
957 + offsetof(struct xt_entry_match,
958 u.user.name),
959 m->u.kernel.match->name,
960 strlen(m->u.kernel.match->name)+1)
961 != 0) {
962 ret = -EFAULT;
963 goto free_counters;
964 }
965 }
966
967 t = ipt_get_target_c(e);
968 if (copy_to_user(userptr + off + e->target_offset
969 + offsetof(struct xt_entry_target,
970 u.user.name),
971 t->u.kernel.target->name,
972 strlen(t->u.kernel.target->name)+1) != 0) {
973 ret = -EFAULT;
974 goto free_counters;
975 }
976 }
977
978 free_counters:
979 vfree(counters);
980 return ret;
981 }
982
983 #ifdef CONFIG_COMPAT
984 static void compat_standard_from_user(void *dst, const void *src)
985 {
986 int v = *(compat_int_t *)src;
987
988 if (v > 0)
989 v += xt_compat_calc_jump(AF_INET, v);
990 memcpy(dst, &v, sizeof(v));
991 }
992
993 static int compat_standard_to_user(void __user *dst, const void *src)
994 {
995 compat_int_t cv = *(int *)src;
996
997 if (cv > 0)
998 cv -= xt_compat_calc_jump(AF_INET, cv);
999 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1000 }
1001
1002 static int compat_calc_entry(const struct ipt_entry *e,
1003 const struct xt_table_info *info,
1004 const void *base, struct xt_table_info *newinfo)
1005 {
1006 const struct xt_entry_match *ematch;
1007 const struct xt_entry_target *t;
1008 unsigned int entry_offset;
1009 int off, i, ret;
1010
1011 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1012 entry_offset = (void *)e - base;
1013 xt_ematch_foreach(ematch, e)
1014 off += xt_compat_match_offset(ematch->u.kernel.match);
1015 t = ipt_get_target_c(e);
1016 off += xt_compat_target_offset(t->u.kernel.target);
1017 newinfo->size -= off;
1018 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1019 if (ret)
1020 return ret;
1021
1022 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1023 if (info->hook_entry[i] &&
1024 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1025 newinfo->hook_entry[i] -= off;
1026 if (info->underflow[i] &&
1027 (e < (struct ipt_entry *)(base + info->underflow[i])))
1028 newinfo->underflow[i] -= off;
1029 }
1030 return 0;
1031 }
1032
1033 static int compat_table_info(const struct xt_table_info *info,
1034 struct xt_table_info *newinfo)
1035 {
1036 struct ipt_entry *iter;
1037 void *loc_cpu_entry;
1038 int ret;
1039
1040 if (!newinfo || !info)
1041 return -EINVAL;
1042
1043 /* we dont care about newinfo->entries[] */
1044 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1045 newinfo->initial_entries = 0;
1046 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1047 xt_compat_init_offsets(AF_INET, info->number);
1048 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1049 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1050 if (ret != 0)
1051 return ret;
1052 }
1053 return 0;
1054 }
1055 #endif
1056
1057 static int get_info(struct net *net, void __user *user,
1058 const int *len, int compat)
1059 {
1060 char name[XT_TABLE_MAXNAMELEN];
1061 struct xt_table *t;
1062 int ret;
1063
1064 if (*len != sizeof(struct ipt_getinfo)) {
1065 duprintf("length %u != %zu\n", *len,
1066 sizeof(struct ipt_getinfo));
1067 return -EINVAL;
1068 }
1069
1070 if (copy_from_user(name, user, sizeof(name)) != 0)
1071 return -EFAULT;
1072
1073 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1074 #ifdef CONFIG_COMPAT
1075 if (compat)
1076 xt_compat_lock(AF_INET);
1077 #endif
1078 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1079 "iptable_%s", name);
1080 if (!IS_ERR_OR_NULL(t)) {
1081 struct ipt_getinfo info;
1082 const struct xt_table_info *private = t->private;
1083 #ifdef CONFIG_COMPAT
1084 struct xt_table_info tmp;
1085
1086 if (compat) {
1087 ret = compat_table_info(private, &tmp);
1088 xt_compat_flush_offsets(AF_INET);
1089 private = &tmp;
1090 }
1091 #endif
1092 memset(&info, 0, sizeof(info));
1093 info.valid_hooks = t->valid_hooks;
1094 memcpy(info.hook_entry, private->hook_entry,
1095 sizeof(info.hook_entry));
1096 memcpy(info.underflow, private->underflow,
1097 sizeof(info.underflow));
1098 info.num_entries = private->number;
1099 info.size = private->size;
1100 strcpy(info.name, name);
1101
1102 if (copy_to_user(user, &info, *len) != 0)
1103 ret = -EFAULT;
1104 else
1105 ret = 0;
1106
1107 xt_table_unlock(t);
1108 module_put(t->me);
1109 } else
1110 ret = t ? PTR_ERR(t) : -ENOENT;
1111 #ifdef CONFIG_COMPAT
1112 if (compat)
1113 xt_compat_unlock(AF_INET);
1114 #endif
1115 return ret;
1116 }
1117
1118 static int
1119 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1120 const int *len)
1121 {
1122 int ret;
1123 struct ipt_get_entries get;
1124 struct xt_table *t;
1125
1126 if (*len < sizeof(get)) {
1127 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1128 return -EINVAL;
1129 }
1130 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1131 return -EFAULT;
1132 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1133 duprintf("get_entries: %u != %zu\n",
1134 *len, sizeof(get) + get.size);
1135 return -EINVAL;
1136 }
1137
1138 t = xt_find_table_lock(net, AF_INET, get.name);
1139 if (!IS_ERR_OR_NULL(t)) {
1140 const struct xt_table_info *private = t->private;
1141 duprintf("t->private->number = %u\n", private->number);
1142 if (get.size == private->size)
1143 ret = copy_entries_to_user(private->size,
1144 t, uptr->entrytable);
1145 else {
1146 duprintf("get_entries: I've got %u not %u!\n",
1147 private->size, get.size);
1148 ret = -EAGAIN;
1149 }
1150 module_put(t->me);
1151 xt_table_unlock(t);
1152 } else
1153 ret = t ? PTR_ERR(t) : -ENOENT;
1154
1155 return ret;
1156 }
1157
1158 static int
1159 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1160 struct xt_table_info *newinfo, unsigned int num_counters,
1161 void __user *counters_ptr)
1162 {
1163 int ret;
1164 struct xt_table *t;
1165 struct xt_table_info *oldinfo;
1166 struct xt_counters *counters;
1167 void *loc_cpu_old_entry;
1168 struct ipt_entry *iter;
1169
1170 ret = 0;
1171 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1172 if (!counters) {
1173 ret = -ENOMEM;
1174 goto out;
1175 }
1176
1177 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1178 "iptable_%s", name);
1179 if (IS_ERR_OR_NULL(t)) {
1180 ret = t ? PTR_ERR(t) : -ENOENT;
1181 goto free_newinfo_counters_untrans;
1182 }
1183
1184 /* You lied! */
1185 if (valid_hooks != t->valid_hooks) {
1186 duprintf("Valid hook crap: %08X vs %08X\n",
1187 valid_hooks, t->valid_hooks);
1188 ret = -EINVAL;
1189 goto put_module;
1190 }
1191
1192 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1193 if (!oldinfo)
1194 goto put_module;
1195
1196 /* Update module usage count based on number of rules */
1197 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1198 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1199 if ((oldinfo->number > oldinfo->initial_entries) ||
1200 (newinfo->number <= oldinfo->initial_entries))
1201 module_put(t->me);
1202 if ((oldinfo->number > oldinfo->initial_entries) &&
1203 (newinfo->number <= oldinfo->initial_entries))
1204 module_put(t->me);
1205
1206 /* Get the old counters, and synchronize with replace */
1207 get_counters(oldinfo, counters);
1208
1209 /* Decrease module usage counts and free resource */
1210 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1211 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1212 cleanup_entry(iter, net);
1213
1214 xt_free_table_info(oldinfo);
1215 if (copy_to_user(counters_ptr, counters,
1216 sizeof(struct xt_counters) * num_counters) != 0) {
1217 /* Silent error, can't fail, new table is already in place */
1218 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1219 }
1220 vfree(counters);
1221 xt_table_unlock(t);
1222 return ret;
1223
1224 put_module:
1225 module_put(t->me);
1226 xt_table_unlock(t);
1227 free_newinfo_counters_untrans:
1228 vfree(counters);
1229 out:
1230 return ret;
1231 }
1232
1233 static int
1234 do_replace(struct net *net, const void __user *user, unsigned int len)
1235 {
1236 int ret;
1237 struct ipt_replace tmp;
1238 struct xt_table_info *newinfo;
1239 void *loc_cpu_entry;
1240 struct ipt_entry *iter;
1241
1242 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1243 return -EFAULT;
1244
1245 /* overflow check */
1246 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1247 return -ENOMEM;
1248 tmp.name[sizeof(tmp.name)-1] = 0;
1249
1250 newinfo = xt_alloc_table_info(tmp.size);
1251 if (!newinfo)
1252 return -ENOMEM;
1253
1254 /* choose the copy that is on our node/cpu */
1255 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1256 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1257 tmp.size) != 0) {
1258 ret = -EFAULT;
1259 goto free_newinfo;
1260 }
1261
1262 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1263 if (ret != 0)
1264 goto free_newinfo;
1265
1266 duprintf("Translated table\n");
1267
1268 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1269 tmp.num_counters, tmp.counters);
1270 if (ret)
1271 goto free_newinfo_untrans;
1272 return 0;
1273
1274 free_newinfo_untrans:
1275 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1276 cleanup_entry(iter, net);
1277 free_newinfo:
1278 xt_free_table_info(newinfo);
1279 return ret;
1280 }
1281
1282 static int
1283 do_add_counters(struct net *net, const void __user *user,
1284 unsigned int len, int compat)
1285 {
1286 unsigned int i, curcpu;
1287 struct xt_counters_info tmp;
1288 struct xt_counters *paddc;
1289 unsigned int num_counters;
1290 const char *name;
1291 int size;
1292 void *ptmp;
1293 struct xt_table *t;
1294 const struct xt_table_info *private;
1295 int ret = 0;
1296 void *loc_cpu_entry;
1297 struct ipt_entry *iter;
1298 unsigned int addend;
1299 #ifdef CONFIG_COMPAT
1300 struct compat_xt_counters_info compat_tmp;
1301
1302 if (compat) {
1303 ptmp = &compat_tmp;
1304 size = sizeof(struct compat_xt_counters_info);
1305 } else
1306 #endif
1307 {
1308 ptmp = &tmp;
1309 size = sizeof(struct xt_counters_info);
1310 }
1311
1312 if (copy_from_user(ptmp, user, size) != 0)
1313 return -EFAULT;
1314
1315 #ifdef CONFIG_COMPAT
1316 if (compat) {
1317 num_counters = compat_tmp.num_counters;
1318 name = compat_tmp.name;
1319 } else
1320 #endif
1321 {
1322 num_counters = tmp.num_counters;
1323 name = tmp.name;
1324 }
1325
1326 if (len != size + num_counters * sizeof(struct xt_counters))
1327 return -EINVAL;
1328
1329 paddc = vmalloc(len - size);
1330 if (!paddc)
1331 return -ENOMEM;
1332
1333 if (copy_from_user(paddc, user + size, len - size) != 0) {
1334 ret = -EFAULT;
1335 goto free;
1336 }
1337
1338 t = xt_find_table_lock(net, AF_INET, name);
1339 if (IS_ERR_OR_NULL(t)) {
1340 ret = t ? PTR_ERR(t) : -ENOENT;
1341 goto free;
1342 }
1343
1344 local_bh_disable();
1345 private = t->private;
1346 if (private->number != num_counters) {
1347 ret = -EINVAL;
1348 goto unlock_up_free;
1349 }
1350
1351 i = 0;
1352 /* Choose the copy that is on our node */
1353 curcpu = smp_processor_id();
1354 loc_cpu_entry = private->entries[curcpu];
1355 addend = xt_write_recseq_begin();
1356 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1357 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1358 ++i;
1359 }
1360 xt_write_recseq_end(addend);
1361 unlock_up_free:
1362 local_bh_enable();
1363 xt_table_unlock(t);
1364 module_put(t->me);
1365 free:
1366 vfree(paddc);
1367
1368 return ret;
1369 }
1370
1371 #ifdef CONFIG_COMPAT
1372 struct compat_ipt_replace {
1373 char name[XT_TABLE_MAXNAMELEN];
1374 u32 valid_hooks;
1375 u32 num_entries;
1376 u32 size;
1377 u32 hook_entry[NF_INET_NUMHOOKS];
1378 u32 underflow[NF_INET_NUMHOOKS];
1379 u32 num_counters;
1380 compat_uptr_t counters; /* struct xt_counters * */
1381 struct compat_ipt_entry entries[0];
1382 };
1383
1384 static int
1385 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1386 unsigned int *size, struct xt_counters *counters,
1387 unsigned int i)
1388 {
1389 struct xt_entry_target *t;
1390 struct compat_ipt_entry __user *ce;
1391 u_int16_t target_offset, next_offset;
1392 compat_uint_t origsize;
1393 const struct xt_entry_match *ematch;
1394 int ret = 0;
1395
1396 origsize = *size;
1397 ce = (struct compat_ipt_entry __user *)*dstptr;
1398 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1399 copy_to_user(&ce->counters, &counters[i],
1400 sizeof(counters[i])) != 0)
1401 return -EFAULT;
1402
1403 *dstptr += sizeof(struct compat_ipt_entry);
1404 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1405
1406 xt_ematch_foreach(ematch, e) {
1407 ret = xt_compat_match_to_user(ematch, dstptr, size);
1408 if (ret != 0)
1409 return ret;
1410 }
1411 target_offset = e->target_offset - (origsize - *size);
1412 t = ipt_get_target(e);
1413 ret = xt_compat_target_to_user(t, dstptr, size);
1414 if (ret)
1415 return ret;
1416 next_offset = e->next_offset - (origsize - *size);
1417 if (put_user(target_offset, &ce->target_offset) != 0 ||
1418 put_user(next_offset, &ce->next_offset) != 0)
1419 return -EFAULT;
1420 return 0;
1421 }
1422
1423 static int
1424 compat_find_calc_match(struct xt_entry_match *m,
1425 const char *name,
1426 const struct ipt_ip *ip,
1427 unsigned int hookmask,
1428 int *size)
1429 {
1430 struct xt_match *match;
1431
1432 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1433 m->u.user.revision);
1434 if (IS_ERR(match)) {
1435 duprintf("compat_check_calc_match: `%s' not found\n",
1436 m->u.user.name);
1437 return PTR_ERR(match);
1438 }
1439 m->u.kernel.match = match;
1440 *size += xt_compat_match_offset(match);
1441 return 0;
1442 }
1443
1444 static void compat_release_entry(struct compat_ipt_entry *e)
1445 {
1446 struct xt_entry_target *t;
1447 struct xt_entry_match *ematch;
1448
1449 /* Cleanup all matches */
1450 xt_ematch_foreach(ematch, e)
1451 module_put(ematch->u.kernel.match->me);
1452 t = compat_ipt_get_target(e);
1453 module_put(t->u.kernel.target->me);
1454 }
1455
1456 static int
1457 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1458 struct xt_table_info *newinfo,
1459 unsigned int *size,
1460 const unsigned char *base,
1461 const unsigned char *limit,
1462 const unsigned int *hook_entries,
1463 const unsigned int *underflows,
1464 const char *name)
1465 {
1466 struct xt_entry_match *ematch;
1467 struct xt_entry_target *t;
1468 struct xt_target *target;
1469 unsigned int entry_offset;
1470 unsigned int j;
1471 int ret, off, h;
1472
1473 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1474 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1475 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1476 (unsigned char *)e + e->next_offset > limit) {
1477 duprintf("Bad offset %p, limit = %p\n", e, limit);
1478 return -EINVAL;
1479 }
1480
1481 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1482 sizeof(struct compat_xt_entry_target)) {
1483 duprintf("checking: element %p size %u\n",
1484 e, e->next_offset);
1485 return -EINVAL;
1486 }
1487
1488 if (!ip_checkentry(&e->ip))
1489 return -EINVAL;
1490
1491 ret = xt_compat_check_entry_offsets(e,
1492 e->target_offset, e->next_offset);
1493 if (ret)
1494 return ret;
1495
1496 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1497 entry_offset = (void *)e - (void *)base;
1498 j = 0;
1499 xt_ematch_foreach(ematch, e) {
1500 ret = compat_find_calc_match(ematch, name,
1501 &e->ip, e->comefrom, &off);
1502 if (ret != 0)
1503 goto release_matches;
1504 ++j;
1505 }
1506
1507 t = compat_ipt_get_target(e);
1508 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1509 t->u.user.revision);
1510 if (IS_ERR(target)) {
1511 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1512 t->u.user.name);
1513 ret = PTR_ERR(target);
1514 goto release_matches;
1515 }
1516 t->u.kernel.target = target;
1517
1518 off += xt_compat_target_offset(target);
1519 *size += off;
1520 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1521 if (ret)
1522 goto out;
1523
1524 /* Check hooks & underflows */
1525 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1526 if ((unsigned char *)e - base == hook_entries[h])
1527 newinfo->hook_entry[h] = hook_entries[h];
1528 if ((unsigned char *)e - base == underflows[h])
1529 newinfo->underflow[h] = underflows[h];
1530 }
1531
1532 /* Clear counters and comefrom */
1533 memset(&e->counters, 0, sizeof(e->counters));
1534 e->comefrom = 0;
1535 return 0;
1536
1537 out:
1538 module_put(t->u.kernel.target->me);
1539 release_matches:
1540 xt_ematch_foreach(ematch, e) {
1541 if (j-- == 0)
1542 break;
1543 module_put(ematch->u.kernel.match->me);
1544 }
1545 return ret;
1546 }
1547
1548 static int
1549 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1550 unsigned int *size, const char *name,
1551 struct xt_table_info *newinfo, unsigned char *base)
1552 {
1553 struct xt_entry_target *t;
1554 struct xt_target *target;
1555 struct ipt_entry *de;
1556 unsigned int origsize;
1557 int ret, h;
1558 struct xt_entry_match *ematch;
1559
1560 ret = 0;
1561 origsize = *size;
1562 de = (struct ipt_entry *)*dstptr;
1563 memcpy(de, e, sizeof(struct ipt_entry));
1564 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1565
1566 *dstptr += sizeof(struct ipt_entry);
1567 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1568
1569 xt_ematch_foreach(ematch, e) {
1570 ret = xt_compat_match_from_user(ematch, dstptr, size);
1571 if (ret != 0)
1572 return ret;
1573 }
1574 de->target_offset = e->target_offset - (origsize - *size);
1575 t = compat_ipt_get_target(e);
1576 target = t->u.kernel.target;
1577 xt_compat_target_from_user(t, dstptr, size);
1578
1579 de->next_offset = e->next_offset - (origsize - *size);
1580 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1581 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1582 newinfo->hook_entry[h] -= origsize - *size;
1583 if ((unsigned char *)de - base < newinfo->underflow[h])
1584 newinfo->underflow[h] -= origsize - *size;
1585 }
1586 return ret;
1587 }
1588
1589 static int
1590 compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1591 {
1592 struct xt_entry_match *ematch;
1593 struct xt_mtchk_param mtpar;
1594 unsigned int j;
1595 int ret = 0;
1596
1597 j = 0;
1598 mtpar.net = net;
1599 mtpar.table = name;
1600 mtpar.entryinfo = &e->ip;
1601 mtpar.hook_mask = e->comefrom;
1602 mtpar.family = NFPROTO_IPV4;
1603 xt_ematch_foreach(ematch, e) {
1604 ret = check_match(ematch, &mtpar);
1605 if (ret != 0)
1606 goto cleanup_matches;
1607 ++j;
1608 }
1609
1610 ret = check_target(e, net, name);
1611 if (ret)
1612 goto cleanup_matches;
1613 return 0;
1614
1615 cleanup_matches:
1616 xt_ematch_foreach(ematch, e) {
1617 if (j-- == 0)
1618 break;
1619 cleanup_match(ematch, net);
1620 }
1621 return ret;
1622 }
1623
1624 static int
1625 translate_compat_table(struct net *net,
1626 const char *name,
1627 unsigned int valid_hooks,
1628 struct xt_table_info **pinfo,
1629 void **pentry0,
1630 unsigned int total_size,
1631 unsigned int number,
1632 unsigned int *hook_entries,
1633 unsigned int *underflows)
1634 {
1635 unsigned int i, j;
1636 struct xt_table_info *newinfo, *info;
1637 void *pos, *entry0, *entry1;
1638 struct compat_ipt_entry *iter0;
1639 struct ipt_entry *iter1;
1640 unsigned int size;
1641 int ret;
1642
1643 info = *pinfo;
1644 entry0 = *pentry0;
1645 size = total_size;
1646 info->number = number;
1647
1648 /* Init all hooks to impossible value. */
1649 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1650 info->hook_entry[i] = 0xFFFFFFFF;
1651 info->underflow[i] = 0xFFFFFFFF;
1652 }
1653
1654 duprintf("translate_compat_table: size %u\n", info->size);
1655 j = 0;
1656 xt_compat_lock(AF_INET);
1657 xt_compat_init_offsets(AF_INET, number);
1658 /* Walk through entries, checking offsets. */
1659 xt_entry_foreach(iter0, entry0, total_size) {
1660 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1661 entry0,
1662 entry0 + total_size,
1663 hook_entries,
1664 underflows,
1665 name);
1666 if (ret != 0)
1667 goto out_unlock;
1668 ++j;
1669 }
1670
1671 ret = -EINVAL;
1672 if (j != number) {
1673 duprintf("translate_compat_table: %u not %u entries\n",
1674 j, number);
1675 goto out_unlock;
1676 }
1677
1678 /* Check hooks all assigned */
1679 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1680 /* Only hooks which are valid */
1681 if (!(valid_hooks & (1 << i)))
1682 continue;
1683 if (info->hook_entry[i] == 0xFFFFFFFF) {
1684 duprintf("Invalid hook entry %u %u\n",
1685 i, hook_entries[i]);
1686 goto out_unlock;
1687 }
1688 if (info->underflow[i] == 0xFFFFFFFF) {
1689 duprintf("Invalid underflow %u %u\n",
1690 i, underflows[i]);
1691 goto out_unlock;
1692 }
1693 }
1694
1695 ret = -ENOMEM;
1696 newinfo = xt_alloc_table_info(size);
1697 if (!newinfo)
1698 goto out_unlock;
1699
1700 newinfo->number = number;
1701 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1702 newinfo->hook_entry[i] = info->hook_entry[i];
1703 newinfo->underflow[i] = info->underflow[i];
1704 }
1705 entry1 = newinfo->entries[raw_smp_processor_id()];
1706 pos = entry1;
1707 size = total_size;
1708 xt_entry_foreach(iter0, entry0, total_size) {
1709 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1710 name, newinfo, entry1);
1711 if (ret != 0)
1712 break;
1713 }
1714 xt_compat_flush_offsets(AF_INET);
1715 xt_compat_unlock(AF_INET);
1716 if (ret)
1717 goto free_newinfo;
1718
1719 ret = -ELOOP;
1720 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1721 goto free_newinfo;
1722
1723 i = 0;
1724 xt_entry_foreach(iter1, entry1, newinfo->size) {
1725 ret = compat_check_entry(iter1, net, name);
1726 if (ret != 0)
1727 break;
1728 ++i;
1729 if (strcmp(ipt_get_target(iter1)->u.user.name,
1730 XT_ERROR_TARGET) == 0)
1731 ++newinfo->stacksize;
1732 }
1733 if (ret) {
1734 /*
1735 * The first i matches need cleanup_entry (calls ->destroy)
1736 * because they had called ->check already. The other j-i
1737 * entries need only release.
1738 */
1739 int skip = i;
1740 j -= i;
1741 xt_entry_foreach(iter0, entry0, newinfo->size) {
1742 if (skip-- > 0)
1743 continue;
1744 if (j-- == 0)
1745 break;
1746 compat_release_entry(iter0);
1747 }
1748 xt_entry_foreach(iter1, entry1, newinfo->size) {
1749 if (i-- == 0)
1750 break;
1751 cleanup_entry(iter1, net);
1752 }
1753 xt_free_table_info(newinfo);
1754 return ret;
1755 }
1756
1757 /* And one copy for every other CPU */
1758 for_each_possible_cpu(i)
1759 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1760 memcpy(newinfo->entries[i], entry1, newinfo->size);
1761
1762 *pinfo = newinfo;
1763 *pentry0 = entry1;
1764 xt_free_table_info(info);
1765 return 0;
1766
1767 free_newinfo:
1768 xt_free_table_info(newinfo);
1769 out:
1770 xt_entry_foreach(iter0, entry0, total_size) {
1771 if (j-- == 0)
1772 break;
1773 compat_release_entry(iter0);
1774 }
1775 return ret;
1776 out_unlock:
1777 xt_compat_flush_offsets(AF_INET);
1778 xt_compat_unlock(AF_INET);
1779 goto out;
1780 }
1781
1782 static int
1783 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1784 {
1785 int ret;
1786 struct compat_ipt_replace tmp;
1787 struct xt_table_info *newinfo;
1788 void *loc_cpu_entry;
1789 struct ipt_entry *iter;
1790
1791 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1792 return -EFAULT;
1793
1794 /* overflow check */
1795 if (tmp.size >= INT_MAX / num_possible_cpus())
1796 return -ENOMEM;
1797 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1798 return -ENOMEM;
1799 tmp.name[sizeof(tmp.name)-1] = 0;
1800
1801 newinfo = xt_alloc_table_info(tmp.size);
1802 if (!newinfo)
1803 return -ENOMEM;
1804
1805 /* choose the copy that is on our node/cpu */
1806 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1807 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1808 tmp.size) != 0) {
1809 ret = -EFAULT;
1810 goto free_newinfo;
1811 }
1812
1813 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1814 &newinfo, &loc_cpu_entry, tmp.size,
1815 tmp.num_entries, tmp.hook_entry,
1816 tmp.underflow);
1817 if (ret != 0)
1818 goto free_newinfo;
1819
1820 duprintf("compat_do_replace: Translated table\n");
1821
1822 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1823 tmp.num_counters, compat_ptr(tmp.counters));
1824 if (ret)
1825 goto free_newinfo_untrans;
1826 return 0;
1827
1828 free_newinfo_untrans:
1829 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1830 cleanup_entry(iter, net);
1831 free_newinfo:
1832 xt_free_table_info(newinfo);
1833 return ret;
1834 }
1835
1836 static int
1837 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1838 unsigned int len)
1839 {
1840 int ret;
1841
1842 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1843 return -EPERM;
1844
1845 switch (cmd) {
1846 case IPT_SO_SET_REPLACE:
1847 ret = compat_do_replace(sock_net(sk), user, len);
1848 break;
1849
1850 case IPT_SO_SET_ADD_COUNTERS:
1851 ret = do_add_counters(sock_net(sk), user, len, 1);
1852 break;
1853
1854 default:
1855 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1856 ret = -EINVAL;
1857 }
1858
1859 return ret;
1860 }
1861
1862 struct compat_ipt_get_entries {
1863 char name[XT_TABLE_MAXNAMELEN];
1864 compat_uint_t size;
1865 struct compat_ipt_entry entrytable[0];
1866 };
1867
1868 static int
1869 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1870 void __user *userptr)
1871 {
1872 struct xt_counters *counters;
1873 const struct xt_table_info *private = table->private;
1874 void __user *pos;
1875 unsigned int size;
1876 int ret = 0;
1877 const void *loc_cpu_entry;
1878 unsigned int i = 0;
1879 struct ipt_entry *iter;
1880
1881 counters = alloc_counters(table);
1882 if (IS_ERR(counters))
1883 return PTR_ERR(counters);
1884
1885 /* choose the copy that is on our node/cpu, ...
1886 * This choice is lazy (because current thread is
1887 * allowed to migrate to another cpu)
1888 */
1889 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1890 pos = userptr;
1891 size = total_size;
1892 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1893 ret = compat_copy_entry_to_user(iter, &pos,
1894 &size, counters, i++);
1895 if (ret != 0)
1896 break;
1897 }
1898
1899 vfree(counters);
1900 return ret;
1901 }
1902
1903 static int
1904 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1905 int *len)
1906 {
1907 int ret;
1908 struct compat_ipt_get_entries get;
1909 struct xt_table *t;
1910
1911 if (*len < sizeof(get)) {
1912 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1913 return -EINVAL;
1914 }
1915
1916 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1917 return -EFAULT;
1918
1919 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1920 duprintf("compat_get_entries: %u != %zu\n",
1921 *len, sizeof(get) + get.size);
1922 return -EINVAL;
1923 }
1924
1925 xt_compat_lock(AF_INET);
1926 t = xt_find_table_lock(net, AF_INET, get.name);
1927 if (!IS_ERR_OR_NULL(t)) {
1928 const struct xt_table_info *private = t->private;
1929 struct xt_table_info info;
1930 duprintf("t->private->number = %u\n", private->number);
1931 ret = compat_table_info(private, &info);
1932 if (!ret && get.size == info.size) {
1933 ret = compat_copy_entries_to_user(private->size,
1934 t, uptr->entrytable);
1935 } else if (!ret) {
1936 duprintf("compat_get_entries: I've got %u not %u!\n",
1937 private->size, get.size);
1938 ret = -EAGAIN;
1939 }
1940 xt_compat_flush_offsets(AF_INET);
1941 module_put(t->me);
1942 xt_table_unlock(t);
1943 } else
1944 ret = t ? PTR_ERR(t) : -ENOENT;
1945
1946 xt_compat_unlock(AF_INET);
1947 return ret;
1948 }
1949
1950 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1951
1952 static int
1953 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1954 {
1955 int ret;
1956
1957 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1958 return -EPERM;
1959
1960 switch (cmd) {
1961 case IPT_SO_GET_INFO:
1962 ret = get_info(sock_net(sk), user, len, 1);
1963 break;
1964 case IPT_SO_GET_ENTRIES:
1965 ret = compat_get_entries(sock_net(sk), user, len);
1966 break;
1967 default:
1968 ret = do_ipt_get_ctl(sk, cmd, user, len);
1969 }
1970 return ret;
1971 }
1972 #endif
1973
1974 static int
1975 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1976 {
1977 int ret;
1978
1979 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1980 return -EPERM;
1981
1982 switch (cmd) {
1983 case IPT_SO_SET_REPLACE:
1984 ret = do_replace(sock_net(sk), user, len);
1985 break;
1986
1987 case IPT_SO_SET_ADD_COUNTERS:
1988 ret = do_add_counters(sock_net(sk), user, len, 0);
1989 break;
1990
1991 default:
1992 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1993 ret = -EINVAL;
1994 }
1995
1996 return ret;
1997 }
1998
1999 static int
2000 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2001 {
2002 int ret;
2003
2004 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2005 return -EPERM;
2006
2007 switch (cmd) {
2008 case IPT_SO_GET_INFO:
2009 ret = get_info(sock_net(sk), user, len, 0);
2010 break;
2011
2012 case IPT_SO_GET_ENTRIES:
2013 ret = get_entries(sock_net(sk), user, len);
2014 break;
2015
2016 case IPT_SO_GET_REVISION_MATCH:
2017 case IPT_SO_GET_REVISION_TARGET: {
2018 struct xt_get_revision rev;
2019 int target;
2020
2021 if (*len != sizeof(rev)) {
2022 ret = -EINVAL;
2023 break;
2024 }
2025 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2026 ret = -EFAULT;
2027 break;
2028 }
2029 rev.name[sizeof(rev.name)-1] = 0;
2030
2031 if (cmd == IPT_SO_GET_REVISION_TARGET)
2032 target = 1;
2033 else
2034 target = 0;
2035
2036 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2037 rev.revision,
2038 target, &ret),
2039 "ipt_%s", rev.name);
2040 break;
2041 }
2042
2043 default:
2044 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2045 ret = -EINVAL;
2046 }
2047
2048 return ret;
2049 }
2050
2051 struct xt_table *ipt_register_table(struct net *net,
2052 const struct xt_table *table,
2053 const struct ipt_replace *repl)
2054 {
2055 int ret;
2056 struct xt_table_info *newinfo;
2057 struct xt_table_info bootstrap = {0};
2058 void *loc_cpu_entry;
2059 struct xt_table *new_table;
2060
2061 newinfo = xt_alloc_table_info(repl->size);
2062 if (!newinfo) {
2063 ret = -ENOMEM;
2064 goto out;
2065 }
2066
2067 /* choose the copy on our node/cpu, but dont care about preemption */
2068 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2069 memcpy(loc_cpu_entry, repl->entries, repl->size);
2070
2071 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2072 if (ret != 0)
2073 goto out_free;
2074
2075 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2076 if (IS_ERR(new_table)) {
2077 ret = PTR_ERR(new_table);
2078 goto out_free;
2079 }
2080
2081 return new_table;
2082
2083 out_free:
2084 xt_free_table_info(newinfo);
2085 out:
2086 return ERR_PTR(ret);
2087 }
2088
2089 void ipt_unregister_table(struct net *net, struct xt_table *table)
2090 {
2091 struct xt_table_info *private;
2092 void *loc_cpu_entry;
2093 struct module *table_owner = table->me;
2094 struct ipt_entry *iter;
2095
2096 private = xt_unregister_table(table);
2097
2098 /* Decrease module usage counts and free resources */
2099 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2100 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2101 cleanup_entry(iter, net);
2102 if (private->number > private->initial_entries)
2103 module_put(table_owner);
2104 xt_free_table_info(private);
2105 }
2106
2107 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2108 static inline bool
2109 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2110 u_int8_t type, u_int8_t code,
2111 bool invert)
2112 {
2113 return ((test_type == 0xFF) ||
2114 (type == test_type && code >= min_code && code <= max_code))
2115 ^ invert;
2116 }
2117
2118 static bool
2119 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2120 {
2121 const struct icmphdr *ic;
2122 struct icmphdr _icmph;
2123 const struct ipt_icmp *icmpinfo = par->matchinfo;
2124
2125 /* Must not be a fragment. */
2126 if (par->fragoff != 0)
2127 return false;
2128
2129 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2130 if (ic == NULL) {
2131 /* We've been asked to examine this packet, and we
2132 * can't. Hence, no choice but to drop.
2133 */
2134 duprintf("Dropping evil ICMP tinygram.\n");
2135 par->hotdrop = true;
2136 return false;
2137 }
2138
2139 return icmp_type_code_match(icmpinfo->type,
2140 icmpinfo->code[0],
2141 icmpinfo->code[1],
2142 ic->type, ic->code,
2143 !!(icmpinfo->invflags&IPT_ICMP_INV));
2144 }
2145
2146 static int icmp_checkentry(const struct xt_mtchk_param *par)
2147 {
2148 const struct ipt_icmp *icmpinfo = par->matchinfo;
2149
2150 /* Must specify no unknown invflags */
2151 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2152 }
2153
2154 static struct xt_target ipt_builtin_tg[] __read_mostly = {
2155 {
2156 .name = XT_STANDARD_TARGET,
2157 .targetsize = sizeof(int),
2158 .family = NFPROTO_IPV4,
2159 #ifdef CONFIG_COMPAT
2160 .compatsize = sizeof(compat_int_t),
2161 .compat_from_user = compat_standard_from_user,
2162 .compat_to_user = compat_standard_to_user,
2163 #endif
2164 },
2165 {
2166 .name = XT_ERROR_TARGET,
2167 .target = ipt_error,
2168 .targetsize = XT_FUNCTION_MAXNAMELEN,
2169 .family = NFPROTO_IPV4,
2170 },
2171 };
2172
2173 static struct nf_sockopt_ops ipt_sockopts = {
2174 .pf = PF_INET,
2175 .set_optmin = IPT_BASE_CTL,
2176 .set_optmax = IPT_SO_SET_MAX+1,
2177 .set = do_ipt_set_ctl,
2178 #ifdef CONFIG_COMPAT
2179 .compat_set = compat_do_ipt_set_ctl,
2180 #endif
2181 .get_optmin = IPT_BASE_CTL,
2182 .get_optmax = IPT_SO_GET_MAX+1,
2183 .get = do_ipt_get_ctl,
2184 #ifdef CONFIG_COMPAT
2185 .compat_get = compat_do_ipt_get_ctl,
2186 #endif
2187 .owner = THIS_MODULE,
2188 };
2189
2190 static struct xt_match ipt_builtin_mt[] __read_mostly = {
2191 {
2192 .name = "icmp",
2193 .match = icmp_match,
2194 .matchsize = sizeof(struct ipt_icmp),
2195 .checkentry = icmp_checkentry,
2196 .proto = IPPROTO_ICMP,
2197 .family = NFPROTO_IPV4,
2198 },
2199 };
2200
2201 static int __net_init ip_tables_net_init(struct net *net)
2202 {
2203 return xt_proto_init(net, NFPROTO_IPV4);
2204 }
2205
2206 static void __net_exit ip_tables_net_exit(struct net *net)
2207 {
2208 xt_proto_fini(net, NFPROTO_IPV4);
2209 }
2210
2211 static struct pernet_operations ip_tables_net_ops = {
2212 .init = ip_tables_net_init,
2213 .exit = ip_tables_net_exit,
2214 };
2215
2216 static int __init ip_tables_init(void)
2217 {
2218 int ret;
2219
2220 ret = register_pernet_subsys(&ip_tables_net_ops);
2221 if (ret < 0)
2222 goto err1;
2223
2224 /* No one else will be downing sem now, so we won't sleep */
2225 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2226 if (ret < 0)
2227 goto err2;
2228 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2229 if (ret < 0)
2230 goto err4;
2231
2232 /* Register setsockopt */
2233 ret = nf_register_sockopt(&ipt_sockopts);
2234 if (ret < 0)
2235 goto err5;
2236
2237 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2238 return 0;
2239
2240 err5:
2241 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2242 err4:
2243 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2244 err2:
2245 unregister_pernet_subsys(&ip_tables_net_ops);
2246 err1:
2247 return ret;
2248 }
2249
2250 static void __exit ip_tables_fini(void)
2251 {
2252 nf_unregister_sockopt(&ipt_sockopts);
2253
2254 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2255 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2256 unregister_pernet_subsys(&ip_tables_net_ops);
2257 }
2258
2259 EXPORT_SYMBOL(ipt_register_table);
2260 EXPORT_SYMBOL(ipt_unregister_table);
2261 EXPORT_SYMBOL(ipt_do_table);
2262 module_init(ip_tables_init);
2263 module_exit(ip_tables_fini);