ip6_gre: fix endianness errors in ip6gre_err
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / route.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
39 *
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
93 #include <net/dst.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
96 #include <net/ip.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
99 #include <net/sock.h>
100 #include <net/ip_fib.h>
101 #include <net/arp.h>
102 #include <net/tcp.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
107 #ifdef CONFIG_SYSCTL
108 #include <linux/sysctl.h>
109 #include <linux/kmemleak.h>
110 #endif
111 #include <net/secure_seq.h>
112
113 #define RT_FL_TOS(oldflp4) \
114 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
115
116 #define IP_MAX_MTU 0xFFF0
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
129
130 /*
131 * Interface to generic destination cache.
132 */
133
134 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
135 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
136 static unsigned int ipv4_mtu(const struct dst_entry *dst);
137 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
138 static void ipv4_link_failure(struct sk_buff *skb);
139 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
140 struct sk_buff *skb, u32 mtu);
141 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb);
143 static void ipv4_dst_destroy(struct dst_entry *dst);
144
145 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
146 int how)
147 {
148 }
149
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
151 {
152 WARN_ON(1);
153 return NULL;
154 }
155
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
157 struct sk_buff *skb,
158 const void *daddr);
159
160 static struct dst_ops ipv4_dst_ops = {
161 .family = AF_INET,
162 .protocol = cpu_to_be16(ETH_P_IP),
163 .check = ipv4_dst_check,
164 .default_advmss = ipv4_default_advmss,
165 .mtu = ipv4_mtu,
166 .cow_metrics = ipv4_cow_metrics,
167 .destroy = ipv4_dst_destroy,
168 .ifdown = ipv4_dst_ifdown,
169 .negative_advice = ipv4_negative_advice,
170 .link_failure = ipv4_link_failure,
171 .update_pmtu = ip_rt_update_pmtu,
172 .redirect = ip_do_redirect,
173 .local_out = __ip_local_out,
174 .neigh_lookup = ipv4_neigh_lookup,
175 };
176
177 #define ECN_OR_COST(class) TC_PRIO_##class
178
179 const __u8 ip_tos2prio[16] = {
180 TC_PRIO_BESTEFFORT,
181 ECN_OR_COST(BESTEFFORT),
182 TC_PRIO_BESTEFFORT,
183 ECN_OR_COST(BESTEFFORT),
184 TC_PRIO_BULK,
185 ECN_OR_COST(BULK),
186 TC_PRIO_BULK,
187 ECN_OR_COST(BULK),
188 TC_PRIO_INTERACTIVE,
189 ECN_OR_COST(INTERACTIVE),
190 TC_PRIO_INTERACTIVE,
191 ECN_OR_COST(INTERACTIVE),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK),
194 TC_PRIO_INTERACTIVE_BULK,
195 ECN_OR_COST(INTERACTIVE_BULK)
196 };
197 EXPORT_SYMBOL(ip_tos2prio);
198
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
201
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 {
205 if (*pos)
206 return NULL;
207 return SEQ_START_TOKEN;
208 }
209
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
211 {
212 ++*pos;
213 return NULL;
214 }
215
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 {
218 }
219
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
221 {
222 if (v == SEQ_START_TOKEN)
223 seq_printf(seq, "%-127s\n",
224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
226 "HHUptod\tSpecDst");
227 return 0;
228 }
229
230 static const struct seq_operations rt_cache_seq_ops = {
231 .start = rt_cache_seq_start,
232 .next = rt_cache_seq_next,
233 .stop = rt_cache_seq_stop,
234 .show = rt_cache_seq_show,
235 };
236
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
238 {
239 return seq_open(file, &rt_cache_seq_ops);
240 }
241
242 static const struct file_operations rt_cache_seq_fops = {
243 .owner = THIS_MODULE,
244 .open = rt_cache_seq_open,
245 .read = seq_read,
246 .llseek = seq_lseek,
247 .release = seq_release,
248 };
249
250
251 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 {
253 int cpu;
254
255 if (*pos == 0)
256 return SEQ_START_TOKEN;
257
258 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
259 if (!cpu_possible(cpu))
260 continue;
261 *pos = cpu+1;
262 return &per_cpu(rt_cache_stat, cpu);
263 }
264 return NULL;
265 }
266
267 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
268 {
269 int cpu;
270
271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
272 if (!cpu_possible(cpu))
273 continue;
274 *pos = cpu+1;
275 return &per_cpu(rt_cache_stat, cpu);
276 }
277 return NULL;
278
279 }
280
281 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 {
283
284 }
285
286 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287 {
288 struct rt_cache_stat *st = v;
289
290 if (v == SEQ_START_TOKEN) {
291 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
292 return 0;
293 }
294
295 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
296 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297 dst_entries_get_slow(&ipv4_dst_ops),
298 st->in_hit,
299 st->in_slow_tot,
300 st->in_slow_mc,
301 st->in_no_route,
302 st->in_brd,
303 st->in_martian_dst,
304 st->in_martian_src,
305
306 st->out_hit,
307 st->out_slow_tot,
308 st->out_slow_mc,
309
310 st->gc_total,
311 st->gc_ignored,
312 st->gc_goal_miss,
313 st->gc_dst_overflow,
314 st->in_hlist_search,
315 st->out_hlist_search
316 );
317 return 0;
318 }
319
320 static const struct seq_operations rt_cpu_seq_ops = {
321 .start = rt_cpu_seq_start,
322 .next = rt_cpu_seq_next,
323 .stop = rt_cpu_seq_stop,
324 .show = rt_cpu_seq_show,
325 };
326
327
328 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329 {
330 return seq_open(file, &rt_cpu_seq_ops);
331 }
332
333 static const struct file_operations rt_cpu_seq_fops = {
334 .owner = THIS_MODULE,
335 .open = rt_cpu_seq_open,
336 .read = seq_read,
337 .llseek = seq_lseek,
338 .release = seq_release,
339 };
340
341 #ifdef CONFIG_IP_ROUTE_CLASSID
342 static int rt_acct_proc_show(struct seq_file *m, void *v)
343 {
344 struct ip_rt_acct *dst, *src;
345 unsigned int i, j;
346
347 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
348 if (!dst)
349 return -ENOMEM;
350
351 for_each_possible_cpu(i) {
352 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
353 for (j = 0; j < 256; j++) {
354 dst[j].o_bytes += src[j].o_bytes;
355 dst[j].o_packets += src[j].o_packets;
356 dst[j].i_bytes += src[j].i_bytes;
357 dst[j].i_packets += src[j].i_packets;
358 }
359 }
360
361 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 kfree(dst);
363 return 0;
364 }
365
366 static int rt_acct_proc_open(struct inode *inode, struct file *file)
367 {
368 return single_open(file, rt_acct_proc_show, NULL);
369 }
370
371 static const struct file_operations rt_acct_proc_fops = {
372 .owner = THIS_MODULE,
373 .open = rt_acct_proc_open,
374 .read = seq_read,
375 .llseek = seq_lseek,
376 .release = single_release,
377 };
378 #endif
379
380 static int __net_init ip_rt_do_proc_init(struct net *net)
381 {
382 struct proc_dir_entry *pde;
383
384 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
385 &rt_cache_seq_fops);
386 if (!pde)
387 goto err1;
388
389 pde = proc_create("rt_cache", S_IRUGO,
390 net->proc_net_stat, &rt_cpu_seq_fops);
391 if (!pde)
392 goto err2;
393
394 #ifdef CONFIG_IP_ROUTE_CLASSID
395 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
396 if (!pde)
397 goto err3;
398 #endif
399 return 0;
400
401 #ifdef CONFIG_IP_ROUTE_CLASSID
402 err3:
403 remove_proc_entry("rt_cache", net->proc_net_stat);
404 #endif
405 err2:
406 remove_proc_entry("rt_cache", net->proc_net);
407 err1:
408 return -ENOMEM;
409 }
410
411 static void __net_exit ip_rt_do_proc_exit(struct net *net)
412 {
413 remove_proc_entry("rt_cache", net->proc_net_stat);
414 remove_proc_entry("rt_cache", net->proc_net);
415 #ifdef CONFIG_IP_ROUTE_CLASSID
416 remove_proc_entry("rt_acct", net->proc_net);
417 #endif
418 }
419
420 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
421 .init = ip_rt_do_proc_init,
422 .exit = ip_rt_do_proc_exit,
423 };
424
425 static int __init ip_rt_proc_init(void)
426 {
427 return register_pernet_subsys(&ip_rt_proc_ops);
428 }
429
430 #else
431 static inline int ip_rt_proc_init(void)
432 {
433 return 0;
434 }
435 #endif /* CONFIG_PROC_FS */
436
437 static inline bool rt_is_expired(const struct rtable *rth)
438 {
439 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
440 }
441
442 void rt_cache_flush(struct net *net)
443 {
444 rt_genid_bump(net);
445 }
446
447 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
448 struct sk_buff *skb,
449 const void *daddr)
450 {
451 struct net_device *dev = dst->dev;
452 const __be32 *pkey = daddr;
453 const struct rtable *rt;
454 struct neighbour *n;
455
456 rt = (const struct rtable *) dst;
457 if (rt->rt_gateway)
458 pkey = (const __be32 *) &rt->rt_gateway;
459 else if (skb)
460 pkey = &ip_hdr(skb)->daddr;
461
462 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 if (n)
464 return n;
465 return neigh_create(&arp_tbl, pkey, dev);
466 }
467
468 #define IP_IDENTS_SZ 2048u
469 struct ip_ident_bucket {
470 atomic_t id;
471 u32 stamp32;
472 };
473
474 static struct ip_ident_bucket *ip_idents __read_mostly;
475
476 /* In order to protect privacy, we add a perturbation to identifiers
477 * if one generator is seldom used. This makes hard for an attacker
478 * to infer how many packets were sent between two points in time.
479 */
480 u32 ip_idents_reserve(u32 hash, int segs)
481 {
482 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
483 u32 old = ACCESS_ONCE(bucket->stamp32);
484 u32 now = (u32)jiffies;
485 u32 delta = 0;
486
487 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) {
488 u64 x = prandom_u32();
489
490 x *= (now - old);
491 delta = (u32)(x >> 32);
492 }
493
494 return atomic_add_return(segs + delta, &bucket->id) - segs;
495 }
496 EXPORT_SYMBOL(ip_idents_reserve);
497
498 void __ip_select_ident(struct iphdr *iph, int segs)
499 {
500 static u32 ip_idents_hashrnd __read_mostly;
501 static bool hashrnd_initialized = false;
502 u32 hash, id;
503
504 if (unlikely(!hashrnd_initialized)) {
505 hashrnd_initialized = true;
506 get_random_bytes(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
507 }
508
509 hash = jhash_3words((__force u32)iph->daddr,
510 (__force u32)iph->saddr,
511 iph->protocol,
512 ip_idents_hashrnd);
513 id = ip_idents_reserve(hash, segs);
514 iph->id = htons(id);
515 }
516 EXPORT_SYMBOL(__ip_select_ident);
517
518 static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
519 const struct iphdr *iph,
520 int oif, u8 tos,
521 u8 prot, u32 mark, int flow_flags)
522 {
523 if (sk) {
524 const struct inet_sock *inet = inet_sk(sk);
525
526 oif = sk->sk_bound_dev_if;
527 mark = sk->sk_mark;
528 tos = RT_CONN_FLAGS(sk);
529 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
530 }
531 flowi4_init_output(fl4, oif, mark, tos,
532 RT_SCOPE_UNIVERSE, prot,
533 flow_flags,
534 iph->daddr, iph->saddr, 0, 0);
535 }
536
537 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
538 const struct sock *sk)
539 {
540 const struct iphdr *iph = ip_hdr(skb);
541 int oif = skb->dev->ifindex;
542 u8 tos = RT_TOS(iph->tos);
543 u8 prot = iph->protocol;
544 u32 mark = skb->mark;
545
546 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
547 }
548
549 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
550 {
551 const struct inet_sock *inet = inet_sk(sk);
552 const struct ip_options_rcu *inet_opt;
553 __be32 daddr = inet->inet_daddr;
554
555 rcu_read_lock();
556 inet_opt = rcu_dereference(inet->inet_opt);
557 if (inet_opt && inet_opt->opt.srr)
558 daddr = inet_opt->opt.faddr;
559 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
560 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
561 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
562 inet_sk_flowi_flags(sk),
563 daddr, inet->inet_saddr, 0, 0);
564 rcu_read_unlock();
565 }
566
567 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
568 const struct sk_buff *skb)
569 {
570 if (skb)
571 build_skb_flow_key(fl4, skb, sk);
572 else
573 build_sk_flow_key(fl4, sk);
574 }
575
576 static inline void rt_free(struct rtable *rt)
577 {
578 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
579 }
580
581 static DEFINE_SPINLOCK(fnhe_lock);
582
583 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
584 {
585 struct fib_nh_exception *fnhe, *oldest;
586 struct rtable *orig;
587
588 oldest = rcu_dereference(hash->chain);
589 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
590 fnhe = rcu_dereference(fnhe->fnhe_next)) {
591 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
592 oldest = fnhe;
593 }
594 orig = rcu_dereference(oldest->fnhe_rth);
595 if (orig) {
596 RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
597 rt_free(orig);
598 }
599 return oldest;
600 }
601
602 static inline u32 fnhe_hashfun(__be32 daddr)
603 {
604 u32 hval;
605
606 hval = (__force u32) daddr;
607 hval ^= (hval >> 11) ^ (hval >> 22);
608
609 return hval & (FNHE_HASH_SIZE - 1);
610 }
611
612 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
613 u32 pmtu, unsigned long expires)
614 {
615 struct fnhe_hash_bucket *hash;
616 struct fib_nh_exception *fnhe;
617 int depth;
618 u32 hval = fnhe_hashfun(daddr);
619
620 spin_lock_bh(&fnhe_lock);
621
622 hash = nh->nh_exceptions;
623 if (!hash) {
624 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
625 if (!hash)
626 goto out_unlock;
627 nh->nh_exceptions = hash;
628 }
629
630 hash += hval;
631
632 depth = 0;
633 for (fnhe = rcu_dereference(hash->chain); fnhe;
634 fnhe = rcu_dereference(fnhe->fnhe_next)) {
635 if (fnhe->fnhe_daddr == daddr)
636 break;
637 depth++;
638 }
639
640 if (fnhe) {
641 if (gw)
642 fnhe->fnhe_gw = gw;
643 if (pmtu) {
644 fnhe->fnhe_pmtu = pmtu;
645 fnhe->fnhe_expires = expires;
646 }
647 } else {
648 if (depth > FNHE_RECLAIM_DEPTH)
649 fnhe = fnhe_oldest(hash);
650 else {
651 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
652 if (!fnhe)
653 goto out_unlock;
654
655 fnhe->fnhe_next = hash->chain;
656 rcu_assign_pointer(hash->chain, fnhe);
657 }
658 fnhe->fnhe_daddr = daddr;
659 fnhe->fnhe_gw = gw;
660 fnhe->fnhe_pmtu = pmtu;
661 fnhe->fnhe_expires = expires;
662 }
663
664 fnhe->fnhe_stamp = jiffies;
665
666 out_unlock:
667 spin_unlock_bh(&fnhe_lock);
668 return;
669 }
670
671 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
672 bool kill_route)
673 {
674 __be32 new_gw = icmp_hdr(skb)->un.gateway;
675 __be32 old_gw = ip_hdr(skb)->saddr;
676 struct net_device *dev = skb->dev;
677 struct in_device *in_dev;
678 struct fib_result res;
679 struct neighbour *n;
680 struct net *net;
681
682 switch (icmp_hdr(skb)->code & 7) {
683 case ICMP_REDIR_NET:
684 case ICMP_REDIR_NETTOS:
685 case ICMP_REDIR_HOST:
686 case ICMP_REDIR_HOSTTOS:
687 break;
688
689 default:
690 return;
691 }
692
693 if (rt->rt_gateway != old_gw)
694 return;
695
696 in_dev = __in_dev_get_rcu(dev);
697 if (!in_dev)
698 return;
699
700 net = dev_net(dev);
701 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
702 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
703 ipv4_is_zeronet(new_gw))
704 goto reject_redirect;
705
706 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
707 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
708 goto reject_redirect;
709 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
710 goto reject_redirect;
711 } else {
712 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
713 goto reject_redirect;
714 }
715
716 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
717 if (!n)
718 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
719 if (!IS_ERR(n)) {
720 if (!(n->nud_state & NUD_VALID)) {
721 neigh_event_send(n, NULL);
722 } else {
723 if (fib_lookup(net, fl4, &res) == 0) {
724 struct fib_nh *nh = &FIB_RES_NH(res);
725
726 update_or_create_fnhe(nh, fl4->daddr, new_gw,
727 0, 0);
728 }
729 if (kill_route)
730 rt->dst.obsolete = DST_OBSOLETE_KILL;
731 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
732 }
733 neigh_release(n);
734 }
735 return;
736
737 reject_redirect:
738 #ifdef CONFIG_IP_ROUTE_VERBOSE
739 if (IN_DEV_LOG_MARTIANS(in_dev)) {
740 const struct iphdr *iph = (const struct iphdr *) skb->data;
741 __be32 daddr = iph->daddr;
742 __be32 saddr = iph->saddr;
743
744 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
745 " Advised path = %pI4 -> %pI4\n",
746 &old_gw, dev->name, &new_gw,
747 &saddr, &daddr);
748 }
749 #endif
750 ;
751 }
752
753 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
754 {
755 struct rtable *rt;
756 struct flowi4 fl4;
757 const struct iphdr *iph = (const struct iphdr *) skb->data;
758 int oif = skb->dev->ifindex;
759 u8 tos = RT_TOS(iph->tos);
760 u8 prot = iph->protocol;
761 u32 mark = skb->mark;
762
763 rt = (struct rtable *) dst;
764
765 __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
766 __ip_do_redirect(rt, skb, &fl4, true);
767 }
768
769 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
770 {
771 struct rtable *rt = (struct rtable *)dst;
772 struct dst_entry *ret = dst;
773
774 if (rt) {
775 if (dst->obsolete > 0) {
776 ip_rt_put(rt);
777 ret = NULL;
778 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
779 rt->dst.expires) {
780 ip_rt_put(rt);
781 ret = NULL;
782 }
783 }
784 return ret;
785 }
786
787 /*
788 * Algorithm:
789 * 1. The first ip_rt_redirect_number redirects are sent
790 * with exponential backoff, then we stop sending them at all,
791 * assuming that the host ignores our redirects.
792 * 2. If we did not see packets requiring redirects
793 * during ip_rt_redirect_silence, we assume that the host
794 * forgot redirected route and start to send redirects again.
795 *
796 * This algorithm is much cheaper and more intelligent than dumb load limiting
797 * in icmp.c.
798 *
799 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
800 * and "frag. need" (breaks PMTU discovery) in icmp.c.
801 */
802
803 void ip_rt_send_redirect(struct sk_buff *skb)
804 {
805 struct rtable *rt = skb_rtable(skb);
806 struct in_device *in_dev;
807 struct inet_peer *peer;
808 struct net *net;
809 int log_martians;
810
811 rcu_read_lock();
812 in_dev = __in_dev_get_rcu(rt->dst.dev);
813 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
814 rcu_read_unlock();
815 return;
816 }
817 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
818 rcu_read_unlock();
819
820 net = dev_net(rt->dst.dev);
821 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
822 if (!peer) {
823 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
824 rt_nexthop(rt, ip_hdr(skb)->daddr));
825 return;
826 }
827
828 /* No redirected packets during ip_rt_redirect_silence;
829 * reset the algorithm.
830 */
831 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
832 peer->rate_tokens = 0;
833
834 /* Too many ignored redirects; do not send anything
835 * set dst.rate_last to the last seen redirected packet.
836 */
837 if (peer->rate_tokens >= ip_rt_redirect_number) {
838 peer->rate_last = jiffies;
839 goto out_put_peer;
840 }
841
842 /* Check for load limit; set rate_last to the latest sent
843 * redirect.
844 */
845 if (peer->rate_tokens == 0 ||
846 time_after(jiffies,
847 (peer->rate_last +
848 (ip_rt_redirect_load << peer->rate_tokens)))) {
849 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
850
851 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
852 peer->rate_last = jiffies;
853 ++peer->rate_tokens;
854 #ifdef CONFIG_IP_ROUTE_VERBOSE
855 if (log_martians &&
856 peer->rate_tokens == ip_rt_redirect_number)
857 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
858 &ip_hdr(skb)->saddr, inet_iif(skb),
859 &ip_hdr(skb)->daddr, &gw);
860 #endif
861 }
862 out_put_peer:
863 inet_putpeer(peer);
864 }
865
866 static int ip_error(struct sk_buff *skb)
867 {
868 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
869 struct rtable *rt = skb_rtable(skb);
870 struct inet_peer *peer;
871 unsigned long now;
872 struct net *net;
873 bool send;
874 int code;
875
876 /* IP on this device is disabled. */
877 if (!in_dev)
878 goto out;
879
880 net = dev_net(rt->dst.dev);
881 if (!IN_DEV_FORWARD(in_dev)) {
882 switch (rt->dst.error) {
883 case EHOSTUNREACH:
884 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
885 break;
886
887 case ENETUNREACH:
888 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
889 break;
890 }
891 goto out;
892 }
893
894 switch (rt->dst.error) {
895 case EINVAL:
896 default:
897 goto out;
898 case EHOSTUNREACH:
899 code = ICMP_HOST_UNREACH;
900 break;
901 case ENETUNREACH:
902 code = ICMP_NET_UNREACH;
903 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
904 break;
905 case EACCES:
906 code = ICMP_PKT_FILTERED;
907 break;
908 }
909
910 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
911
912 send = true;
913 if (peer) {
914 now = jiffies;
915 peer->rate_tokens += now - peer->rate_last;
916 if (peer->rate_tokens > ip_rt_error_burst)
917 peer->rate_tokens = ip_rt_error_burst;
918 peer->rate_last = now;
919 if (peer->rate_tokens >= ip_rt_error_cost)
920 peer->rate_tokens -= ip_rt_error_cost;
921 else
922 send = false;
923 inet_putpeer(peer);
924 }
925 if (send)
926 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
927
928 out: kfree_skb(skb);
929 return 0;
930 }
931
932 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
933 {
934 struct dst_entry *dst = &rt->dst;
935 struct fib_result res;
936
937 if (dst_metric_locked(dst, RTAX_MTU))
938 return;
939
940 if (dst->dev->mtu < mtu)
941 return;
942
943 if (mtu < ip_rt_min_pmtu)
944 mtu = ip_rt_min_pmtu;
945
946 if (!rt->rt_pmtu) {
947 dst->obsolete = DST_OBSOLETE_KILL;
948 } else {
949 rt->rt_pmtu = mtu;
950 dst->expires = max(1UL, jiffies + ip_rt_mtu_expires);
951 }
952
953 rcu_read_lock();
954 if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
955 struct fib_nh *nh = &FIB_RES_NH(res);
956
957 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
958 jiffies + ip_rt_mtu_expires);
959 }
960 rcu_read_unlock();
961 }
962
963 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
964 struct sk_buff *skb, u32 mtu)
965 {
966 struct rtable *rt = (struct rtable *) dst;
967 struct flowi4 fl4;
968
969 ip_rt_build_flow_key(&fl4, sk, skb);
970 __ip_rt_update_pmtu(rt, &fl4, mtu);
971 }
972
973 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
974 int oif, u32 mark, u8 protocol, int flow_flags)
975 {
976 const struct iphdr *iph = (const struct iphdr *) skb->data;
977 struct flowi4 fl4;
978 struct rtable *rt;
979
980 __build_flow_key(&fl4, NULL, iph, oif,
981 RT_TOS(iph->tos), protocol, mark, flow_flags);
982 rt = __ip_route_output_key(net, &fl4);
983 if (!IS_ERR(rt)) {
984 __ip_rt_update_pmtu(rt, &fl4, mtu);
985 ip_rt_put(rt);
986 }
987 }
988 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
989
990 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
991 {
992 const struct iphdr *iph = (const struct iphdr *) skb->data;
993 struct flowi4 fl4;
994 struct rtable *rt;
995
996 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
997 rt = __ip_route_output_key(sock_net(sk), &fl4);
998 if (!IS_ERR(rt)) {
999 __ip_rt_update_pmtu(rt, &fl4, mtu);
1000 ip_rt_put(rt);
1001 }
1002 }
1003
1004 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1005 {
1006 const struct iphdr *iph = (const struct iphdr *) skb->data;
1007 struct flowi4 fl4;
1008 struct rtable *rt;
1009 struct dst_entry *odst = NULL;
1010 bool new = false;
1011
1012 bh_lock_sock(sk);
1013 odst = sk_dst_get(sk);
1014
1015 if (sock_owned_by_user(sk) || !odst) {
1016 __ipv4_sk_update_pmtu(skb, sk, mtu);
1017 goto out;
1018 }
1019
1020 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1021
1022 rt = (struct rtable *)odst;
1023 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1024 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1025 if (IS_ERR(rt))
1026 goto out;
1027
1028 new = true;
1029 }
1030
1031 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1032
1033 if (!dst_check(&rt->dst, 0)) {
1034 if (new)
1035 dst_release(&rt->dst);
1036
1037 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1038 if (IS_ERR(rt))
1039 goto out;
1040
1041 new = true;
1042 }
1043
1044 if (new)
1045 sk_dst_set(sk, &rt->dst);
1046
1047 out:
1048 bh_unlock_sock(sk);
1049 dst_release(odst);
1050 }
1051 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1052
1053 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1054 int oif, u32 mark, u8 protocol, int flow_flags)
1055 {
1056 const struct iphdr *iph = (const struct iphdr *) skb->data;
1057 struct flowi4 fl4;
1058 struct rtable *rt;
1059
1060 __build_flow_key(&fl4, NULL, iph, oif,
1061 RT_TOS(iph->tos), protocol, mark, flow_flags);
1062 rt = __ip_route_output_key(net, &fl4);
1063 if (!IS_ERR(rt)) {
1064 __ip_do_redirect(rt, skb, &fl4, false);
1065 ip_rt_put(rt);
1066 }
1067 }
1068 EXPORT_SYMBOL_GPL(ipv4_redirect);
1069
1070 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1071 {
1072 const struct iphdr *iph = (const struct iphdr *) skb->data;
1073 struct flowi4 fl4;
1074 struct rtable *rt;
1075
1076 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1077 rt = __ip_route_output_key(sock_net(sk), &fl4);
1078 if (!IS_ERR(rt)) {
1079 __ip_do_redirect(rt, skb, &fl4, false);
1080 ip_rt_put(rt);
1081 }
1082 }
1083 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1084
1085 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1086 {
1087 struct rtable *rt = (struct rtable *) dst;
1088
1089 /* All IPV4 dsts are created with ->obsolete set to the value
1090 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1091 * into this function always.
1092 *
1093 * When a PMTU/redirect information update invalidates a
1094 * route, this is indicated by setting obsolete to
1095 * DST_OBSOLETE_KILL.
1096 */
1097 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1098 return NULL;
1099 return dst;
1100 }
1101
1102 static void ipv4_link_failure(struct sk_buff *skb)
1103 {
1104 struct rtable *rt;
1105
1106 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1107
1108 rt = skb_rtable(skb);
1109 if (rt)
1110 dst_set_expires(&rt->dst, 0);
1111 }
1112
1113 static int ip_rt_bug(struct sk_buff *skb)
1114 {
1115 pr_debug("%s: %pI4 -> %pI4, %s\n",
1116 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1117 skb->dev ? skb->dev->name : "?");
1118 kfree_skb(skb);
1119 WARN_ON(1);
1120 return 0;
1121 }
1122
1123 /*
1124 We do not cache source address of outgoing interface,
1125 because it is used only by IP RR, TS and SRR options,
1126 so that it out of fast path.
1127
1128 BTW remember: "addr" is allowed to be not aligned
1129 in IP options!
1130 */
1131
1132 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1133 {
1134 __be32 src;
1135
1136 if (rt_is_output_route(rt))
1137 src = ip_hdr(skb)->saddr;
1138 else {
1139 struct fib_result res;
1140 struct flowi4 fl4;
1141 struct iphdr *iph;
1142
1143 iph = ip_hdr(skb);
1144
1145 memset(&fl4, 0, sizeof(fl4));
1146 fl4.daddr = iph->daddr;
1147 fl4.saddr = iph->saddr;
1148 fl4.flowi4_tos = RT_TOS(iph->tos);
1149 fl4.flowi4_oif = rt->dst.dev->ifindex;
1150 fl4.flowi4_iif = skb->dev->ifindex;
1151 fl4.flowi4_mark = skb->mark;
1152
1153 rcu_read_lock();
1154 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1155 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1156 else
1157 src = inet_select_addr(rt->dst.dev,
1158 rt_nexthop(rt, iph->daddr),
1159 RT_SCOPE_UNIVERSE);
1160 rcu_read_unlock();
1161 }
1162 memcpy(addr, &src, 4);
1163 }
1164
1165 #ifdef CONFIG_IP_ROUTE_CLASSID
1166 static void set_class_tag(struct rtable *rt, u32 tag)
1167 {
1168 if (!(rt->dst.tclassid & 0xFFFF))
1169 rt->dst.tclassid |= tag & 0xFFFF;
1170 if (!(rt->dst.tclassid & 0xFFFF0000))
1171 rt->dst.tclassid |= tag & 0xFFFF0000;
1172 }
1173 #endif
1174
1175 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1176 {
1177 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1178
1179 if (advmss == 0) {
1180 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1181 ip_rt_min_advmss);
1182 if (advmss > 65535 - 40)
1183 advmss = 65535 - 40;
1184 }
1185 return advmss;
1186 }
1187
1188 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1189 {
1190 const struct rtable *rt = (const struct rtable *) dst;
1191 unsigned int mtu = rt->rt_pmtu;
1192
1193 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1194 mtu = dst_metric_raw(dst, RTAX_MTU);
1195
1196 if (mtu)
1197 return mtu;
1198
1199 mtu = dst->dev->mtu;
1200
1201 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1202 if (rt->rt_uses_gateway && mtu > 576)
1203 mtu = 576;
1204 }
1205
1206 if (mtu > IP_MAX_MTU)
1207 mtu = IP_MAX_MTU;
1208
1209 return mtu;
1210 }
1211
1212 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1213 {
1214 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1215 struct fib_nh_exception *fnhe;
1216 u32 hval;
1217
1218 if (!hash)
1219 return NULL;
1220
1221 hval = fnhe_hashfun(daddr);
1222
1223 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1224 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1225 if (fnhe->fnhe_daddr == daddr)
1226 return fnhe;
1227 }
1228 return NULL;
1229 }
1230
1231 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1232 __be32 daddr)
1233 {
1234 bool ret = false;
1235
1236 spin_lock_bh(&fnhe_lock);
1237
1238 if (daddr == fnhe->fnhe_daddr) {
1239 struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
1240 if (orig && rt_is_expired(orig)) {
1241 fnhe->fnhe_gw = 0;
1242 fnhe->fnhe_pmtu = 0;
1243 fnhe->fnhe_expires = 0;
1244 }
1245 if (fnhe->fnhe_pmtu) {
1246 unsigned long expires = fnhe->fnhe_expires;
1247 unsigned long diff = expires - jiffies;
1248
1249 if (time_before(jiffies, expires)) {
1250 rt->rt_pmtu = fnhe->fnhe_pmtu;
1251 dst_set_expires(&rt->dst, diff);
1252 }
1253 }
1254 if (fnhe->fnhe_gw) {
1255 rt->rt_flags |= RTCF_REDIRECTED;
1256 rt->rt_gateway = fnhe->fnhe_gw;
1257 rt->rt_uses_gateway = 1;
1258 } else if (!rt->rt_gateway)
1259 rt->rt_gateway = daddr;
1260
1261 rcu_assign_pointer(fnhe->fnhe_rth, rt);
1262 if (orig)
1263 rt_free(orig);
1264
1265 fnhe->fnhe_stamp = jiffies;
1266 ret = true;
1267 }
1268 spin_unlock_bh(&fnhe_lock);
1269
1270 return ret;
1271 }
1272
1273 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1274 {
1275 struct rtable *orig, *prev, **p;
1276 bool ret = true;
1277
1278 if (rt_is_input_route(rt)) {
1279 p = (struct rtable **)&nh->nh_rth_input;
1280 } else {
1281 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1282 }
1283 orig = *p;
1284
1285 prev = cmpxchg(p, orig, rt);
1286 if (prev == orig) {
1287 if (orig)
1288 rt_free(orig);
1289 } else
1290 ret = false;
1291
1292 return ret;
1293 }
1294
1295 static DEFINE_SPINLOCK(rt_uncached_lock);
1296 static LIST_HEAD(rt_uncached_list);
1297
1298 static void rt_add_uncached_list(struct rtable *rt)
1299 {
1300 spin_lock_bh(&rt_uncached_lock);
1301 list_add_tail(&rt->rt_uncached, &rt_uncached_list);
1302 spin_unlock_bh(&rt_uncached_lock);
1303 }
1304
1305 static void ipv4_dst_destroy(struct dst_entry *dst)
1306 {
1307 struct rtable *rt = (struct rtable *) dst;
1308
1309 if (!list_empty(&rt->rt_uncached)) {
1310 spin_lock_bh(&rt_uncached_lock);
1311 list_del(&rt->rt_uncached);
1312 spin_unlock_bh(&rt_uncached_lock);
1313 }
1314 }
1315
1316 void rt_flush_dev(struct net_device *dev)
1317 {
1318 if (!list_empty(&rt_uncached_list)) {
1319 struct net *net = dev_net(dev);
1320 struct rtable *rt;
1321
1322 spin_lock_bh(&rt_uncached_lock);
1323 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
1324 if (rt->dst.dev != dev)
1325 continue;
1326 rt->dst.dev = net->loopback_dev;
1327 dev_hold(rt->dst.dev);
1328 dev_put(dev);
1329 }
1330 spin_unlock_bh(&rt_uncached_lock);
1331 }
1332 }
1333
1334 static bool rt_cache_valid(const struct rtable *rt)
1335 {
1336 return rt &&
1337 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1338 !rt_is_expired(rt);
1339 }
1340
1341 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1342 const struct fib_result *res,
1343 struct fib_nh_exception *fnhe,
1344 struct fib_info *fi, u16 type, u32 itag)
1345 {
1346 bool cached = false;
1347
1348 if (fi) {
1349 struct fib_nh *nh = &FIB_RES_NH(*res);
1350
1351 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1352 rt->rt_gateway = nh->nh_gw;
1353 rt->rt_uses_gateway = 1;
1354 }
1355 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1356 #ifdef CONFIG_IP_ROUTE_CLASSID
1357 rt->dst.tclassid = nh->nh_tclassid;
1358 #endif
1359 if (unlikely(fnhe))
1360 cached = rt_bind_exception(rt, fnhe, daddr);
1361 else if (!(rt->dst.flags & DST_NOCACHE))
1362 cached = rt_cache_route(nh, rt);
1363 if (unlikely(!cached)) {
1364 /* Routes we intend to cache in nexthop exception or
1365 * FIB nexthop have the DST_NOCACHE bit clear.
1366 * However, if we are unsuccessful at storing this
1367 * route into the cache we really need to set it.
1368 */
1369 rt->dst.flags |= DST_NOCACHE;
1370 if (!rt->rt_gateway)
1371 rt->rt_gateway = daddr;
1372 rt_add_uncached_list(rt);
1373 }
1374 } else
1375 rt_add_uncached_list(rt);
1376
1377 #ifdef CONFIG_IP_ROUTE_CLASSID
1378 #ifdef CONFIG_IP_MULTIPLE_TABLES
1379 set_class_tag(rt, res->tclassid);
1380 #endif
1381 set_class_tag(rt, itag);
1382 #endif
1383 }
1384
1385 static struct rtable *rt_dst_alloc(struct net_device *dev,
1386 bool nopolicy, bool noxfrm, bool will_cache)
1387 {
1388 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1389 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1390 (nopolicy ? DST_NOPOLICY : 0) |
1391 (noxfrm ? DST_NOXFRM : 0));
1392 }
1393
1394 /* called in rcu_read_lock() section */
1395 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1396 u8 tos, struct net_device *dev, int our)
1397 {
1398 struct rtable *rth;
1399 struct in_device *in_dev = __in_dev_get_rcu(dev);
1400 u32 itag = 0;
1401 int err;
1402
1403 /* Primary sanity checks. */
1404
1405 if (in_dev == NULL)
1406 return -EINVAL;
1407
1408 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1409 skb->protocol != htons(ETH_P_IP))
1410 goto e_inval;
1411
1412 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1413 if (ipv4_is_loopback(saddr))
1414 goto e_inval;
1415
1416 if (ipv4_is_zeronet(saddr)) {
1417 if (!ipv4_is_local_multicast(daddr))
1418 goto e_inval;
1419 } else {
1420 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1421 in_dev, &itag);
1422 if (err < 0)
1423 goto e_err;
1424 }
1425 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1426 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1427 if (!rth)
1428 goto e_nobufs;
1429
1430 #ifdef CONFIG_IP_ROUTE_CLASSID
1431 rth->dst.tclassid = itag;
1432 #endif
1433 rth->dst.output = ip_rt_bug;
1434
1435 rth->rt_genid = rt_genid(dev_net(dev));
1436 rth->rt_flags = RTCF_MULTICAST;
1437 rth->rt_type = RTN_MULTICAST;
1438 rth->rt_is_input= 1;
1439 rth->rt_iif = 0;
1440 rth->rt_pmtu = 0;
1441 rth->rt_gateway = 0;
1442 rth->rt_uses_gateway = 0;
1443 INIT_LIST_HEAD(&rth->rt_uncached);
1444 if (our) {
1445 rth->dst.input= ip_local_deliver;
1446 rth->rt_flags |= RTCF_LOCAL;
1447 }
1448
1449 #ifdef CONFIG_IP_MROUTE
1450 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1451 rth->dst.input = ip_mr_input;
1452 #endif
1453 RT_CACHE_STAT_INC(in_slow_mc);
1454
1455 skb_dst_set(skb, &rth->dst);
1456 return 0;
1457
1458 e_nobufs:
1459 return -ENOBUFS;
1460 e_inval:
1461 return -EINVAL;
1462 e_err:
1463 return err;
1464 }
1465
1466
1467 static void ip_handle_martian_source(struct net_device *dev,
1468 struct in_device *in_dev,
1469 struct sk_buff *skb,
1470 __be32 daddr,
1471 __be32 saddr)
1472 {
1473 RT_CACHE_STAT_INC(in_martian_src);
1474 #ifdef CONFIG_IP_ROUTE_VERBOSE
1475 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1476 /*
1477 * RFC1812 recommendation, if source is martian,
1478 * the only hint is MAC header.
1479 */
1480 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1481 &daddr, &saddr, dev->name);
1482 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1483 print_hex_dump(KERN_WARNING, "ll header: ",
1484 DUMP_PREFIX_OFFSET, 16, 1,
1485 skb_mac_header(skb),
1486 dev->hard_header_len, true);
1487 }
1488 }
1489 #endif
1490 }
1491
1492 /* called in rcu_read_lock() section */
1493 static int __mkroute_input(struct sk_buff *skb,
1494 const struct fib_result *res,
1495 struct in_device *in_dev,
1496 __be32 daddr, __be32 saddr, u32 tos)
1497 {
1498 struct rtable *rth;
1499 int err;
1500 struct in_device *out_dev;
1501 unsigned int flags = 0;
1502 bool do_cache;
1503 u32 itag = 0;
1504
1505 /* get a working reference to the output device */
1506 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1507 if (out_dev == NULL) {
1508 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1509 return -EINVAL;
1510 }
1511
1512 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1513 in_dev->dev, in_dev, &itag);
1514 if (err < 0) {
1515 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1516 saddr);
1517
1518 goto cleanup;
1519 }
1520
1521 do_cache = res->fi && !itag;
1522 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1523 skb->protocol == htons(ETH_P_IP) &&
1524 (IN_DEV_SHARED_MEDIA(out_dev) ||
1525 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1526 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1527
1528 if (skb->protocol != htons(ETH_P_IP)) {
1529 /* Not IP (i.e. ARP). Do not create route, if it is
1530 * invalid for proxy arp. DNAT routes are always valid.
1531 *
1532 * Proxy arp feature have been extended to allow, ARP
1533 * replies back to the same interface, to support
1534 * Private VLAN switch technologies. See arp.c.
1535 */
1536 if (out_dev == in_dev &&
1537 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1538 err = -EINVAL;
1539 goto cleanup;
1540 }
1541 }
1542
1543 if (do_cache) {
1544 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1545 if (rt_cache_valid(rth)) {
1546 skb_dst_set_noref(skb, &rth->dst);
1547 goto out;
1548 }
1549 }
1550
1551 rth = rt_dst_alloc(out_dev->dev,
1552 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1553 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1554 if (!rth) {
1555 err = -ENOBUFS;
1556 goto cleanup;
1557 }
1558
1559 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1560 rth->rt_flags = flags;
1561 rth->rt_type = res->type;
1562 rth->rt_is_input = 1;
1563 rth->rt_iif = 0;
1564 rth->rt_pmtu = 0;
1565 rth->rt_gateway = 0;
1566 rth->rt_uses_gateway = 0;
1567 INIT_LIST_HEAD(&rth->rt_uncached);
1568 RT_CACHE_STAT_INC(in_slow_tot);
1569
1570 rth->dst.input = ip_forward;
1571 rth->dst.output = ip_output;
1572
1573 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
1574 skb_dst_set(skb, &rth->dst);
1575 out:
1576 err = 0;
1577 cleanup:
1578 return err;
1579 }
1580
1581 static int ip_mkroute_input(struct sk_buff *skb,
1582 struct fib_result *res,
1583 const struct flowi4 *fl4,
1584 struct in_device *in_dev,
1585 __be32 daddr, __be32 saddr, u32 tos)
1586 {
1587 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1588 if (res->fi && res->fi->fib_nhs > 1)
1589 fib_select_multipath(res);
1590 #endif
1591
1592 /* create a routing cache entry */
1593 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1594 }
1595
1596 /*
1597 * NOTE. We drop all the packets that has local source
1598 * addresses, because every properly looped back packet
1599 * must have correct destination already attached by output routine.
1600 *
1601 * Such approach solves two big problems:
1602 * 1. Not simplex devices are handled properly.
1603 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1604 * called with rcu_read_lock()
1605 */
1606
1607 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1608 u8 tos, struct net_device *dev)
1609 {
1610 struct fib_result res;
1611 struct in_device *in_dev = __in_dev_get_rcu(dev);
1612 struct flowi4 fl4;
1613 unsigned int flags = 0;
1614 u32 itag = 0;
1615 struct rtable *rth;
1616 int err = -EINVAL;
1617 struct net *net = dev_net(dev);
1618 bool do_cache;
1619
1620 /* IP on this device is disabled. */
1621
1622 if (!in_dev)
1623 goto out;
1624
1625 /* Check for the most weird martians, which can be not detected
1626 by fib_lookup.
1627 */
1628
1629 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1630 goto martian_source;
1631
1632 res.fi = NULL;
1633 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1634 goto brd_input;
1635
1636 /* Accept zero addresses only to limited broadcast;
1637 * I even do not know to fix it or not. Waiting for complains :-)
1638 */
1639 if (ipv4_is_zeronet(saddr))
1640 goto martian_source;
1641
1642 if (ipv4_is_zeronet(daddr))
1643 goto martian_destination;
1644
1645 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1646 * and call it once if daddr or/and saddr are loopback addresses
1647 */
1648 if (ipv4_is_loopback(daddr)) {
1649 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1650 goto martian_destination;
1651 } else if (ipv4_is_loopback(saddr)) {
1652 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1653 goto martian_source;
1654 }
1655
1656 /*
1657 * Now we are ready to route packet.
1658 */
1659 fl4.flowi4_oif = 0;
1660 fl4.flowi4_iif = dev->ifindex;
1661 fl4.flowi4_mark = skb->mark;
1662 fl4.flowi4_tos = tos;
1663 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1664 fl4.daddr = daddr;
1665 fl4.saddr = saddr;
1666 err = fib_lookup(net, &fl4, &res);
1667 if (err != 0)
1668 goto no_route;
1669
1670 if (res.type == RTN_BROADCAST)
1671 goto brd_input;
1672
1673 if (res.type == RTN_LOCAL) {
1674 err = fib_validate_source(skb, saddr, daddr, tos,
1675 LOOPBACK_IFINDEX,
1676 dev, in_dev, &itag);
1677 if (err < 0)
1678 goto martian_source_keep_err;
1679 goto local_input;
1680 }
1681
1682 if (!IN_DEV_FORWARD(in_dev))
1683 goto no_route;
1684 if (res.type != RTN_UNICAST)
1685 goto martian_destination;
1686
1687 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1688 out: return err;
1689
1690 brd_input:
1691 if (skb->protocol != htons(ETH_P_IP))
1692 goto e_inval;
1693
1694 if (!ipv4_is_zeronet(saddr)) {
1695 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1696 in_dev, &itag);
1697 if (err < 0)
1698 goto martian_source_keep_err;
1699 }
1700 flags |= RTCF_BROADCAST;
1701 res.type = RTN_BROADCAST;
1702 RT_CACHE_STAT_INC(in_brd);
1703
1704 local_input:
1705 do_cache = false;
1706 if (res.fi) {
1707 if (!itag) {
1708 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1709 if (rt_cache_valid(rth)) {
1710 skb_dst_set_noref(skb, &rth->dst);
1711 err = 0;
1712 goto out;
1713 }
1714 do_cache = true;
1715 }
1716 }
1717
1718 rth = rt_dst_alloc(net->loopback_dev,
1719 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1720 if (!rth)
1721 goto e_nobufs;
1722
1723 rth->dst.input= ip_local_deliver;
1724 rth->dst.output= ip_rt_bug;
1725 #ifdef CONFIG_IP_ROUTE_CLASSID
1726 rth->dst.tclassid = itag;
1727 #endif
1728
1729 rth->rt_genid = rt_genid(net);
1730 rth->rt_flags = flags|RTCF_LOCAL;
1731 rth->rt_type = res.type;
1732 rth->rt_is_input = 1;
1733 rth->rt_iif = 0;
1734 rth->rt_pmtu = 0;
1735 rth->rt_gateway = 0;
1736 rth->rt_uses_gateway = 0;
1737 INIT_LIST_HEAD(&rth->rt_uncached);
1738 RT_CACHE_STAT_INC(in_slow_tot);
1739 if (res.type == RTN_UNREACHABLE) {
1740 rth->dst.input= ip_error;
1741 rth->dst.error= -err;
1742 rth->rt_flags &= ~RTCF_LOCAL;
1743 }
1744 if (do_cache) {
1745 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1746 rth->dst.flags |= DST_NOCACHE;
1747 rt_add_uncached_list(rth);
1748 }
1749 }
1750 skb_dst_set(skb, &rth->dst);
1751 err = 0;
1752 goto out;
1753
1754 no_route:
1755 RT_CACHE_STAT_INC(in_no_route);
1756 res.type = RTN_UNREACHABLE;
1757 if (err == -ESRCH)
1758 err = -ENETUNREACH;
1759 goto local_input;
1760
1761 /*
1762 * Do not cache martian addresses: they should be logged (RFC1812)
1763 */
1764 martian_destination:
1765 RT_CACHE_STAT_INC(in_martian_dst);
1766 #ifdef CONFIG_IP_ROUTE_VERBOSE
1767 if (IN_DEV_LOG_MARTIANS(in_dev))
1768 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1769 &daddr, &saddr, dev->name);
1770 #endif
1771
1772 e_inval:
1773 err = -EINVAL;
1774 goto out;
1775
1776 e_nobufs:
1777 err = -ENOBUFS;
1778 goto out;
1779
1780 martian_source:
1781 err = -EINVAL;
1782 martian_source_keep_err:
1783 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1784 goto out;
1785 }
1786
1787 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1788 u8 tos, struct net_device *dev)
1789 {
1790 int res;
1791
1792 tos &= IPTOS_RT_MASK;
1793 rcu_read_lock();
1794
1795 /* Multicast recognition logic is moved from route cache to here.
1796 The problem was that too many Ethernet cards have broken/missing
1797 hardware multicast filters :-( As result the host on multicasting
1798 network acquires a lot of useless route cache entries, sort of
1799 SDR messages from all the world. Now we try to get rid of them.
1800 Really, provided software IP multicast filter is organized
1801 reasonably (at least, hashed), it does not result in a slowdown
1802 comparing with route cache reject entries.
1803 Note, that multicast routers are not affected, because
1804 route cache entry is created eventually.
1805 */
1806 if (ipv4_is_multicast(daddr)) {
1807 struct in_device *in_dev = __in_dev_get_rcu(dev);
1808
1809 if (in_dev) {
1810 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1811 ip_hdr(skb)->protocol);
1812 if (our
1813 #ifdef CONFIG_IP_MROUTE
1814 ||
1815 (!ipv4_is_local_multicast(daddr) &&
1816 IN_DEV_MFORWARD(in_dev))
1817 #endif
1818 ) {
1819 int res = ip_route_input_mc(skb, daddr, saddr,
1820 tos, dev, our);
1821 rcu_read_unlock();
1822 return res;
1823 }
1824 }
1825 rcu_read_unlock();
1826 return -EINVAL;
1827 }
1828 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1829 rcu_read_unlock();
1830 return res;
1831 }
1832 EXPORT_SYMBOL(ip_route_input_noref);
1833
1834 /* called with rcu_read_lock() */
1835 static struct rtable *__mkroute_output(const struct fib_result *res,
1836 const struct flowi4 *fl4, int orig_oif,
1837 struct net_device *dev_out,
1838 unsigned int flags)
1839 {
1840 struct fib_info *fi = res->fi;
1841 struct fib_nh_exception *fnhe;
1842 struct in_device *in_dev;
1843 u16 type = res->type;
1844 struct rtable *rth;
1845 bool do_cache;
1846
1847 in_dev = __in_dev_get_rcu(dev_out);
1848 if (!in_dev)
1849 return ERR_PTR(-EINVAL);
1850
1851 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1852 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1853 return ERR_PTR(-EINVAL);
1854
1855 if (ipv4_is_lbcast(fl4->daddr))
1856 type = RTN_BROADCAST;
1857 else if (ipv4_is_multicast(fl4->daddr))
1858 type = RTN_MULTICAST;
1859 else if (ipv4_is_zeronet(fl4->daddr))
1860 return ERR_PTR(-EINVAL);
1861
1862 if (dev_out->flags & IFF_LOOPBACK)
1863 flags |= RTCF_LOCAL;
1864
1865 do_cache = true;
1866 if (type == RTN_BROADCAST) {
1867 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1868 fi = NULL;
1869 } else if (type == RTN_MULTICAST) {
1870 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1871 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1872 fl4->flowi4_proto))
1873 flags &= ~RTCF_LOCAL;
1874 else
1875 do_cache = false;
1876 /* If multicast route do not exist use
1877 * default one, but do not gateway in this case.
1878 * Yes, it is hack.
1879 */
1880 if (fi && res->prefixlen < 4)
1881 fi = NULL;
1882 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
1883 (orig_oif != dev_out->ifindex)) {
1884 /* For local routes that require a particular output interface
1885 * we do not want to cache the result. Caching the result
1886 * causes incorrect behaviour when there are multiple source
1887 * addresses on the interface, the end result being that if the
1888 * intended recipient is waiting on that interface for the
1889 * packet he won't receive it because it will be delivered on
1890 * the loopback interface and the IP_PKTINFO ipi_ifindex will
1891 * be set to the loopback interface as well.
1892 */
1893 fi = NULL;
1894 }
1895
1896 fnhe = NULL;
1897 do_cache &= fi != NULL;
1898 if (do_cache) {
1899 struct rtable __rcu **prth;
1900 struct fib_nh *nh = &FIB_RES_NH(*res);
1901
1902 fnhe = find_exception(nh, fl4->daddr);
1903 if (fnhe)
1904 prth = &fnhe->fnhe_rth;
1905 else {
1906 if (unlikely(fl4->flowi4_flags &
1907 FLOWI_FLAG_KNOWN_NH &&
1908 !(nh->nh_gw &&
1909 nh->nh_scope == RT_SCOPE_LINK))) {
1910 do_cache = false;
1911 goto add;
1912 }
1913 prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
1914 }
1915 rth = rcu_dereference(*prth);
1916 if (rt_cache_valid(rth)) {
1917 dst_hold(&rth->dst);
1918 return rth;
1919 }
1920 }
1921
1922 add:
1923 rth = rt_dst_alloc(dev_out,
1924 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1925 IN_DEV_CONF_GET(in_dev, NOXFRM),
1926 do_cache);
1927 if (!rth)
1928 return ERR_PTR(-ENOBUFS);
1929
1930 rth->dst.output = ip_output;
1931
1932 rth->rt_genid = rt_genid(dev_net(dev_out));
1933 rth->rt_flags = flags;
1934 rth->rt_type = type;
1935 rth->rt_is_input = 0;
1936 rth->rt_iif = orig_oif ? : 0;
1937 rth->rt_pmtu = 0;
1938 rth->rt_gateway = 0;
1939 rth->rt_uses_gateway = 0;
1940 INIT_LIST_HEAD(&rth->rt_uncached);
1941
1942 RT_CACHE_STAT_INC(out_slow_tot);
1943
1944 if (flags & RTCF_LOCAL)
1945 rth->dst.input = ip_local_deliver;
1946 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1947 if (flags & RTCF_LOCAL &&
1948 !(dev_out->flags & IFF_LOOPBACK)) {
1949 rth->dst.output = ip_mc_output;
1950 RT_CACHE_STAT_INC(out_slow_mc);
1951 }
1952 #ifdef CONFIG_IP_MROUTE
1953 if (type == RTN_MULTICAST) {
1954 if (IN_DEV_MFORWARD(in_dev) &&
1955 !ipv4_is_local_multicast(fl4->daddr)) {
1956 rth->dst.input = ip_mr_input;
1957 rth->dst.output = ip_mc_output;
1958 }
1959 }
1960 #endif
1961 }
1962
1963 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
1964
1965 return rth;
1966 }
1967
1968 /*
1969 * Major route resolver routine.
1970 */
1971
1972 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1973 {
1974 struct net_device *dev_out = NULL;
1975 __u8 tos = RT_FL_TOS(fl4);
1976 unsigned int flags = 0;
1977 struct fib_result res;
1978 struct rtable *rth;
1979 int orig_oif;
1980
1981 res.tclassid = 0;
1982 res.fi = NULL;
1983 res.table = NULL;
1984
1985 orig_oif = fl4->flowi4_oif;
1986
1987 fl4->flowi4_iif = LOOPBACK_IFINDEX;
1988 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1989 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1990 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
1991
1992 rcu_read_lock();
1993 if (fl4->saddr) {
1994 rth = ERR_PTR(-EINVAL);
1995 if (ipv4_is_multicast(fl4->saddr) ||
1996 ipv4_is_lbcast(fl4->saddr) ||
1997 ipv4_is_zeronet(fl4->saddr))
1998 goto out;
1999
2000 /* I removed check for oif == dev_out->oif here.
2001 It was wrong for two reasons:
2002 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2003 is assigned to multiple interfaces.
2004 2. Moreover, we are allowed to send packets with saddr
2005 of another iface. --ANK
2006 */
2007
2008 if (fl4->flowi4_oif == 0 &&
2009 (ipv4_is_multicast(fl4->daddr) ||
2010 ipv4_is_lbcast(fl4->daddr))) {
2011 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2012 dev_out = __ip_dev_find(net, fl4->saddr, false);
2013 if (dev_out == NULL)
2014 goto out;
2015
2016 /* Special hack: user can direct multicasts
2017 and limited broadcast via necessary interface
2018 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2019 This hack is not just for fun, it allows
2020 vic,vat and friends to work.
2021 They bind socket to loopback, set ttl to zero
2022 and expect that it will work.
2023 From the viewpoint of routing cache they are broken,
2024 because we are not allowed to build multicast path
2025 with loopback source addr (look, routing cache
2026 cannot know, that ttl is zero, so that packet
2027 will not leave this host and route is valid).
2028 Luckily, this hack is good workaround.
2029 */
2030
2031 fl4->flowi4_oif = dev_out->ifindex;
2032 goto make_route;
2033 }
2034
2035 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2036 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2037 if (!__ip_dev_find(net, fl4->saddr, false))
2038 goto out;
2039 }
2040 }
2041
2042
2043 if (fl4->flowi4_oif) {
2044 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2045 rth = ERR_PTR(-ENODEV);
2046 if (dev_out == NULL)
2047 goto out;
2048
2049 /* RACE: Check return value of inet_select_addr instead. */
2050 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2051 rth = ERR_PTR(-ENETUNREACH);
2052 goto out;
2053 }
2054 if (ipv4_is_local_multicast(fl4->daddr) ||
2055 ipv4_is_lbcast(fl4->daddr)) {
2056 if (!fl4->saddr)
2057 fl4->saddr = inet_select_addr(dev_out, 0,
2058 RT_SCOPE_LINK);
2059 goto make_route;
2060 }
2061 if (!fl4->saddr) {
2062 if (ipv4_is_multicast(fl4->daddr))
2063 fl4->saddr = inet_select_addr(dev_out, 0,
2064 fl4->flowi4_scope);
2065 else if (!fl4->daddr)
2066 fl4->saddr = inet_select_addr(dev_out, 0,
2067 RT_SCOPE_HOST);
2068 }
2069 }
2070
2071 if (!fl4->daddr) {
2072 fl4->daddr = fl4->saddr;
2073 if (!fl4->daddr)
2074 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2075 dev_out = net->loopback_dev;
2076 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2077 res.type = RTN_LOCAL;
2078 flags |= RTCF_LOCAL;
2079 goto make_route;
2080 }
2081
2082 if (fib_lookup(net, fl4, &res)) {
2083 res.fi = NULL;
2084 res.table = NULL;
2085 if (fl4->flowi4_oif) {
2086 /* Apparently, routing tables are wrong. Assume,
2087 that the destination is on link.
2088
2089 WHY? DW.
2090 Because we are allowed to send to iface
2091 even if it has NO routes and NO assigned
2092 addresses. When oif is specified, routing
2093 tables are looked up with only one purpose:
2094 to catch if destination is gatewayed, rather than
2095 direct. Moreover, if MSG_DONTROUTE is set,
2096 we send packet, ignoring both routing tables
2097 and ifaddr state. --ANK
2098
2099
2100 We could make it even if oif is unknown,
2101 likely IPv6, but we do not.
2102 */
2103
2104 if (fl4->saddr == 0)
2105 fl4->saddr = inet_select_addr(dev_out, 0,
2106 RT_SCOPE_LINK);
2107 res.type = RTN_UNICAST;
2108 goto make_route;
2109 }
2110 rth = ERR_PTR(-ENETUNREACH);
2111 goto out;
2112 }
2113
2114 if (res.type == RTN_LOCAL) {
2115 if (!fl4->saddr) {
2116 if (res.fi->fib_prefsrc)
2117 fl4->saddr = res.fi->fib_prefsrc;
2118 else
2119 fl4->saddr = fl4->daddr;
2120 }
2121 dev_out = net->loopback_dev;
2122 fl4->flowi4_oif = dev_out->ifindex;
2123 flags |= RTCF_LOCAL;
2124 goto make_route;
2125 }
2126
2127 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2128 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2129 fib_select_multipath(&res);
2130 else
2131 #endif
2132 if (!res.prefixlen &&
2133 res.table->tb_num_default > 1 &&
2134 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2135 fib_select_default(&res);
2136
2137 if (!fl4->saddr)
2138 fl4->saddr = FIB_RES_PREFSRC(net, res);
2139
2140 dev_out = FIB_RES_DEV(res);
2141 fl4->flowi4_oif = dev_out->ifindex;
2142
2143
2144 make_route:
2145 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2146
2147 out:
2148 rcu_read_unlock();
2149 return rth;
2150 }
2151 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2152
2153 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2154 {
2155 return NULL;
2156 }
2157
2158 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2159 {
2160 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2161
2162 return mtu ? : dst->dev->mtu;
2163 }
2164
2165 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2166 struct sk_buff *skb, u32 mtu)
2167 {
2168 }
2169
2170 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2171 struct sk_buff *skb)
2172 {
2173 }
2174
2175 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2176 unsigned long old)
2177 {
2178 return NULL;
2179 }
2180
2181 static struct dst_ops ipv4_dst_blackhole_ops = {
2182 .family = AF_INET,
2183 .protocol = cpu_to_be16(ETH_P_IP),
2184 .check = ipv4_blackhole_dst_check,
2185 .mtu = ipv4_blackhole_mtu,
2186 .default_advmss = ipv4_default_advmss,
2187 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2188 .redirect = ipv4_rt_blackhole_redirect,
2189 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2190 .neigh_lookup = ipv4_neigh_lookup,
2191 };
2192
2193 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2194 {
2195 struct rtable *ort = (struct rtable *) dst_orig;
2196 struct rtable *rt;
2197
2198 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2199 if (rt) {
2200 struct dst_entry *new = &rt->dst;
2201
2202 new->__use = 1;
2203 new->input = dst_discard;
2204 new->output = dst_discard;
2205
2206 new->dev = ort->dst.dev;
2207 if (new->dev)
2208 dev_hold(new->dev);
2209
2210 rt->rt_is_input = ort->rt_is_input;
2211 rt->rt_iif = ort->rt_iif;
2212 rt->rt_pmtu = ort->rt_pmtu;
2213
2214 rt->rt_genid = rt_genid(net);
2215 rt->rt_flags = ort->rt_flags;
2216 rt->rt_type = ort->rt_type;
2217 rt->rt_gateway = ort->rt_gateway;
2218 rt->rt_uses_gateway = ort->rt_uses_gateway;
2219
2220 INIT_LIST_HEAD(&rt->rt_uncached);
2221
2222 dst_free(new);
2223 }
2224
2225 dst_release(dst_orig);
2226
2227 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2228 }
2229
2230 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2231 struct sock *sk)
2232 {
2233 struct rtable *rt = __ip_route_output_key(net, flp4);
2234
2235 if (IS_ERR(rt))
2236 return rt;
2237
2238 if (flp4->flowi4_proto)
2239 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2240 flowi4_to_flowi(flp4),
2241 sk, 0);
2242
2243 return rt;
2244 }
2245 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2246
2247 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2248 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2249 u32 seq, int event, int nowait, unsigned int flags)
2250 {
2251 struct rtable *rt = skb_rtable(skb);
2252 struct rtmsg *r;
2253 struct nlmsghdr *nlh;
2254 unsigned long expires = 0;
2255 u32 error;
2256 u32 metrics[RTAX_MAX];
2257
2258 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2259 if (nlh == NULL)
2260 return -EMSGSIZE;
2261
2262 r = nlmsg_data(nlh);
2263 r->rtm_family = AF_INET;
2264 r->rtm_dst_len = 32;
2265 r->rtm_src_len = 0;
2266 r->rtm_tos = fl4->flowi4_tos;
2267 r->rtm_table = RT_TABLE_MAIN;
2268 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2269 goto nla_put_failure;
2270 r->rtm_type = rt->rt_type;
2271 r->rtm_scope = RT_SCOPE_UNIVERSE;
2272 r->rtm_protocol = RTPROT_UNSPEC;
2273 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2274 if (rt->rt_flags & RTCF_NOTIFY)
2275 r->rtm_flags |= RTM_F_NOTIFY;
2276 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2277 r->rtm_flags |= RTCF_DOREDIRECT;
2278
2279 if (nla_put_be32(skb, RTA_DST, dst))
2280 goto nla_put_failure;
2281 if (src) {
2282 r->rtm_src_len = 32;
2283 if (nla_put_be32(skb, RTA_SRC, src))
2284 goto nla_put_failure;
2285 }
2286 if (rt->dst.dev &&
2287 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2288 goto nla_put_failure;
2289 #ifdef CONFIG_IP_ROUTE_CLASSID
2290 if (rt->dst.tclassid &&
2291 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2292 goto nla_put_failure;
2293 #endif
2294 if (!rt_is_input_route(rt) &&
2295 fl4->saddr != src) {
2296 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2297 goto nla_put_failure;
2298 }
2299 if (rt->rt_uses_gateway &&
2300 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2301 goto nla_put_failure;
2302
2303 expires = rt->dst.expires;
2304 if (expires) {
2305 unsigned long now = jiffies;
2306
2307 if (time_before(now, expires))
2308 expires -= now;
2309 else
2310 expires = 0;
2311 }
2312
2313 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2314 if (rt->rt_pmtu && expires)
2315 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2316 if (rtnetlink_put_metrics(skb, metrics) < 0)
2317 goto nla_put_failure;
2318
2319 if (fl4->flowi4_mark &&
2320 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2321 goto nla_put_failure;
2322
2323 error = rt->dst.error;
2324
2325 if (rt_is_input_route(rt)) {
2326 #ifdef CONFIG_IP_MROUTE
2327 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2328 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2329 int err = ipmr_get_route(net, skb,
2330 fl4->saddr, fl4->daddr,
2331 r, nowait, portid);
2332
2333 if (err <= 0) {
2334 if (!nowait) {
2335 if (err == 0)
2336 return 0;
2337 goto nla_put_failure;
2338 } else {
2339 if (err == -EMSGSIZE)
2340 goto nla_put_failure;
2341 error = err;
2342 }
2343 }
2344 } else
2345 #endif
2346 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2347 goto nla_put_failure;
2348 }
2349
2350 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2351 goto nla_put_failure;
2352
2353 return nlmsg_end(skb, nlh);
2354
2355 nla_put_failure:
2356 nlmsg_cancel(skb, nlh);
2357 return -EMSGSIZE;
2358 }
2359
2360 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2361 {
2362 struct net *net = sock_net(in_skb->sk);
2363 struct rtmsg *rtm;
2364 struct nlattr *tb[RTA_MAX+1];
2365 struct rtable *rt = NULL;
2366 struct flowi4 fl4;
2367 __be32 dst = 0;
2368 __be32 src = 0;
2369 u32 iif;
2370 int err;
2371 int mark;
2372 struct sk_buff *skb;
2373
2374 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2375 if (err < 0)
2376 goto errout;
2377
2378 rtm = nlmsg_data(nlh);
2379
2380 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2381 if (skb == NULL) {
2382 err = -ENOBUFS;
2383 goto errout;
2384 }
2385
2386 /* Reserve room for dummy headers, this skb can pass
2387 through good chunk of routing engine.
2388 */
2389 skb_reset_mac_header(skb);
2390 skb_reset_network_header(skb);
2391
2392 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2393 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2394 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2395
2396 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2397 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2398 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2399 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2400
2401 memset(&fl4, 0, sizeof(fl4));
2402 fl4.daddr = dst;
2403 fl4.saddr = src;
2404 fl4.flowi4_tos = rtm->rtm_tos;
2405 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2406 fl4.flowi4_mark = mark;
2407
2408 if (iif) {
2409 struct net_device *dev;
2410
2411 dev = __dev_get_by_index(net, iif);
2412 if (dev == NULL) {
2413 err = -ENODEV;
2414 goto errout_free;
2415 }
2416
2417 skb->protocol = htons(ETH_P_IP);
2418 skb->dev = dev;
2419 skb->mark = mark;
2420 local_bh_disable();
2421 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2422 local_bh_enable();
2423
2424 rt = skb_rtable(skb);
2425 if (err == 0 && rt->dst.error)
2426 err = -rt->dst.error;
2427 } else {
2428 rt = ip_route_output_key(net, &fl4);
2429
2430 err = 0;
2431 if (IS_ERR(rt))
2432 err = PTR_ERR(rt);
2433 }
2434
2435 if (err)
2436 goto errout_free;
2437
2438 skb_dst_set(skb, &rt->dst);
2439 if (rtm->rtm_flags & RTM_F_NOTIFY)
2440 rt->rt_flags |= RTCF_NOTIFY;
2441
2442 err = rt_fill_info(net, dst, src, &fl4, skb,
2443 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2444 RTM_NEWROUTE, 0, 0);
2445 if (err <= 0)
2446 goto errout_free;
2447
2448 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2449 errout:
2450 return err;
2451
2452 errout_free:
2453 kfree_skb(skb);
2454 goto errout;
2455 }
2456
2457 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2458 {
2459 return skb->len;
2460 }
2461
2462 void ip_rt_multicast_event(struct in_device *in_dev)
2463 {
2464 rt_cache_flush(dev_net(in_dev->dev));
2465 }
2466
2467 #ifdef CONFIG_SYSCTL
2468 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2469 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2470 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2471 static int ip_rt_gc_elasticity __read_mostly = 8;
2472
2473 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2474 void __user *buffer,
2475 size_t *lenp, loff_t *ppos)
2476 {
2477 if (write) {
2478 rt_cache_flush((struct net *)__ctl->extra1);
2479 return 0;
2480 }
2481
2482 return -EINVAL;
2483 }
2484
2485 static ctl_table ipv4_route_table[] = {
2486 {
2487 .procname = "gc_thresh",
2488 .data = &ipv4_dst_ops.gc_thresh,
2489 .maxlen = sizeof(int),
2490 .mode = 0644,
2491 .proc_handler = proc_dointvec,
2492 },
2493 {
2494 .procname = "max_size",
2495 .data = &ip_rt_max_size,
2496 .maxlen = sizeof(int),
2497 .mode = 0644,
2498 .proc_handler = proc_dointvec,
2499 },
2500 {
2501 /* Deprecated. Use gc_min_interval_ms */
2502
2503 .procname = "gc_min_interval",
2504 .data = &ip_rt_gc_min_interval,
2505 .maxlen = sizeof(int),
2506 .mode = 0644,
2507 .proc_handler = proc_dointvec_jiffies,
2508 },
2509 {
2510 .procname = "gc_min_interval_ms",
2511 .data = &ip_rt_gc_min_interval,
2512 .maxlen = sizeof(int),
2513 .mode = 0644,
2514 .proc_handler = proc_dointvec_ms_jiffies,
2515 },
2516 {
2517 .procname = "gc_timeout",
2518 .data = &ip_rt_gc_timeout,
2519 .maxlen = sizeof(int),
2520 .mode = 0644,
2521 .proc_handler = proc_dointvec_jiffies,
2522 },
2523 {
2524 .procname = "gc_interval",
2525 .data = &ip_rt_gc_interval,
2526 .maxlen = sizeof(int),
2527 .mode = 0644,
2528 .proc_handler = proc_dointvec_jiffies,
2529 },
2530 {
2531 .procname = "redirect_load",
2532 .data = &ip_rt_redirect_load,
2533 .maxlen = sizeof(int),
2534 .mode = 0644,
2535 .proc_handler = proc_dointvec,
2536 },
2537 {
2538 .procname = "redirect_number",
2539 .data = &ip_rt_redirect_number,
2540 .maxlen = sizeof(int),
2541 .mode = 0644,
2542 .proc_handler = proc_dointvec,
2543 },
2544 {
2545 .procname = "redirect_silence",
2546 .data = &ip_rt_redirect_silence,
2547 .maxlen = sizeof(int),
2548 .mode = 0644,
2549 .proc_handler = proc_dointvec,
2550 },
2551 {
2552 .procname = "error_cost",
2553 .data = &ip_rt_error_cost,
2554 .maxlen = sizeof(int),
2555 .mode = 0644,
2556 .proc_handler = proc_dointvec,
2557 },
2558 {
2559 .procname = "error_burst",
2560 .data = &ip_rt_error_burst,
2561 .maxlen = sizeof(int),
2562 .mode = 0644,
2563 .proc_handler = proc_dointvec,
2564 },
2565 {
2566 .procname = "gc_elasticity",
2567 .data = &ip_rt_gc_elasticity,
2568 .maxlen = sizeof(int),
2569 .mode = 0644,
2570 .proc_handler = proc_dointvec,
2571 },
2572 {
2573 .procname = "mtu_expires",
2574 .data = &ip_rt_mtu_expires,
2575 .maxlen = sizeof(int),
2576 .mode = 0644,
2577 .proc_handler = proc_dointvec_jiffies,
2578 },
2579 {
2580 .procname = "min_pmtu",
2581 .data = &ip_rt_min_pmtu,
2582 .maxlen = sizeof(int),
2583 .mode = 0644,
2584 .proc_handler = proc_dointvec,
2585 },
2586 {
2587 .procname = "min_adv_mss",
2588 .data = &ip_rt_min_advmss,
2589 .maxlen = sizeof(int),
2590 .mode = 0644,
2591 .proc_handler = proc_dointvec,
2592 },
2593 { }
2594 };
2595
2596 static struct ctl_table ipv4_route_flush_table[] = {
2597 {
2598 .procname = "flush",
2599 .maxlen = sizeof(int),
2600 .mode = 0200,
2601 .proc_handler = ipv4_sysctl_rtcache_flush,
2602 },
2603 { },
2604 };
2605
2606 static __net_init int sysctl_route_net_init(struct net *net)
2607 {
2608 struct ctl_table *tbl;
2609
2610 tbl = ipv4_route_flush_table;
2611 if (!net_eq(net, &init_net)) {
2612 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2613 if (tbl == NULL)
2614 goto err_dup;
2615
2616 /* Don't export sysctls to unprivileged users */
2617 if (net->user_ns != &init_user_ns)
2618 tbl[0].procname = NULL;
2619 }
2620 tbl[0].extra1 = net;
2621
2622 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2623 if (net->ipv4.route_hdr == NULL)
2624 goto err_reg;
2625 return 0;
2626
2627 err_reg:
2628 if (tbl != ipv4_route_flush_table)
2629 kfree(tbl);
2630 err_dup:
2631 return -ENOMEM;
2632 }
2633
2634 static __net_exit void sysctl_route_net_exit(struct net *net)
2635 {
2636 struct ctl_table *tbl;
2637
2638 tbl = net->ipv4.route_hdr->ctl_table_arg;
2639 unregister_net_sysctl_table(net->ipv4.route_hdr);
2640 BUG_ON(tbl == ipv4_route_flush_table);
2641 kfree(tbl);
2642 }
2643
2644 static __net_initdata struct pernet_operations sysctl_route_ops = {
2645 .init = sysctl_route_net_init,
2646 .exit = sysctl_route_net_exit,
2647 };
2648 #endif
2649
2650 static __net_init int rt_genid_init(struct net *net)
2651 {
2652 atomic_set(&net->rt_genid, 0);
2653 get_random_bytes(&net->ipv4.dev_addr_genid,
2654 sizeof(net->ipv4.dev_addr_genid));
2655 return 0;
2656 }
2657
2658 static __net_initdata struct pernet_operations rt_genid_ops = {
2659 .init = rt_genid_init,
2660 };
2661
2662 static int __net_init ipv4_inetpeer_init(struct net *net)
2663 {
2664 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2665
2666 if (!bp)
2667 return -ENOMEM;
2668 inet_peer_base_init(bp);
2669 net->ipv4.peers = bp;
2670 return 0;
2671 }
2672
2673 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2674 {
2675 struct inet_peer_base *bp = net->ipv4.peers;
2676
2677 net->ipv4.peers = NULL;
2678 inetpeer_invalidate_tree(bp);
2679 kfree(bp);
2680 }
2681
2682 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2683 .init = ipv4_inetpeer_init,
2684 .exit = ipv4_inetpeer_exit,
2685 };
2686
2687 #ifdef CONFIG_IP_ROUTE_CLASSID
2688 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2689 #endif /* CONFIG_IP_ROUTE_CLASSID */
2690
2691 int __init ip_rt_init(void)
2692 {
2693 int rc = 0;
2694
2695 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2696 if (!ip_idents)
2697 panic("IP: failed to allocate ip_idents\n");
2698
2699 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2700
2701 #ifdef CONFIG_IP_ROUTE_CLASSID
2702 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2703 if (!ip_rt_acct)
2704 panic("IP: failed to allocate ip_rt_acct\n");
2705 #endif
2706
2707 ipv4_dst_ops.kmem_cachep =
2708 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2709 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2710
2711 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2712
2713 if (dst_entries_init(&ipv4_dst_ops) < 0)
2714 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2715
2716 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2717 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2718
2719 ipv4_dst_ops.gc_thresh = ~0;
2720 ip_rt_max_size = INT_MAX;
2721
2722 devinet_init();
2723 ip_fib_init();
2724
2725 if (ip_rt_proc_init())
2726 pr_err("Unable to create route proc files\n");
2727 #ifdef CONFIG_XFRM
2728 xfrm_init();
2729 xfrm4_init();
2730 #endif
2731 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2732
2733 #ifdef CONFIG_SYSCTL
2734 register_pernet_subsys(&sysctl_route_ops);
2735 #endif
2736 register_pernet_subsys(&rt_genid_ops);
2737 register_pernet_subsys(&ipv4_inetpeer_ops);
2738 return rc;
2739 }
2740
2741 #ifdef CONFIG_SYSCTL
2742 /*
2743 * We really need to sanitize the damn ipv4 init order, then all
2744 * this nonsense will go away.
2745 */
2746 void __init ip_static_sysctl_init(void)
2747 {
2748 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2749 }
2750 #endif