292c08985b0f5e055e5ebec6a0327b7419f7a818
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / net / ipv4 / route.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
39 *
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
93 #include <net/dst.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
96 #include <net/ip.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
99 #include <net/sock.h>
100 #include <net/ip_fib.h>
101 #include <net/arp.h>
102 #include <net/tcp.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
107 #ifdef CONFIG_SYSCTL
108 #include <linux/sysctl.h>
109 #include <linux/kmemleak.h>
110 #endif
111 #include <net/secure_seq.h>
112
113 #define RT_FL_TOS(oldflp4) \
114 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
115
116 #define IP_MAX_MTU 0xFFF0
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
129
130 /*
131 * Interface to generic destination cache.
132 */
133
134 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
135 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
136 static unsigned int ipv4_mtu(const struct dst_entry *dst);
137 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
138 static void ipv4_link_failure(struct sk_buff *skb);
139 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
140 struct sk_buff *skb, u32 mtu);
141 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb);
143 static void ipv4_dst_destroy(struct dst_entry *dst);
144
145 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
146 int how)
147 {
148 }
149
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
151 {
152 WARN_ON(1);
153 return NULL;
154 }
155
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
157 struct sk_buff *skb,
158 const void *daddr);
159
160 static struct dst_ops ipv4_dst_ops = {
161 .family = AF_INET,
162 .protocol = cpu_to_be16(ETH_P_IP),
163 .check = ipv4_dst_check,
164 .default_advmss = ipv4_default_advmss,
165 .mtu = ipv4_mtu,
166 .cow_metrics = ipv4_cow_metrics,
167 .destroy = ipv4_dst_destroy,
168 .ifdown = ipv4_dst_ifdown,
169 .negative_advice = ipv4_negative_advice,
170 .link_failure = ipv4_link_failure,
171 .update_pmtu = ip_rt_update_pmtu,
172 .redirect = ip_do_redirect,
173 .local_out = __ip_local_out,
174 .neigh_lookup = ipv4_neigh_lookup,
175 };
176
177 #define ECN_OR_COST(class) TC_PRIO_##class
178
179 const __u8 ip_tos2prio[16] = {
180 TC_PRIO_BESTEFFORT,
181 ECN_OR_COST(BESTEFFORT),
182 TC_PRIO_BESTEFFORT,
183 ECN_OR_COST(BESTEFFORT),
184 TC_PRIO_BULK,
185 ECN_OR_COST(BULK),
186 TC_PRIO_BULK,
187 ECN_OR_COST(BULK),
188 TC_PRIO_INTERACTIVE,
189 ECN_OR_COST(INTERACTIVE),
190 TC_PRIO_INTERACTIVE,
191 ECN_OR_COST(INTERACTIVE),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK),
194 TC_PRIO_INTERACTIVE_BULK,
195 ECN_OR_COST(INTERACTIVE_BULK)
196 };
197 EXPORT_SYMBOL(ip_tos2prio);
198
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
201
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 {
205 if (*pos)
206 return NULL;
207 return SEQ_START_TOKEN;
208 }
209
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
211 {
212 ++*pos;
213 return NULL;
214 }
215
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 {
218 }
219
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
221 {
222 if (v == SEQ_START_TOKEN)
223 seq_printf(seq, "%-127s\n",
224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
226 "HHUptod\tSpecDst");
227 return 0;
228 }
229
230 static const struct seq_operations rt_cache_seq_ops = {
231 .start = rt_cache_seq_start,
232 .next = rt_cache_seq_next,
233 .stop = rt_cache_seq_stop,
234 .show = rt_cache_seq_show,
235 };
236
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
238 {
239 return seq_open(file, &rt_cache_seq_ops);
240 }
241
242 static const struct file_operations rt_cache_seq_fops = {
243 .owner = THIS_MODULE,
244 .open = rt_cache_seq_open,
245 .read = seq_read,
246 .llseek = seq_lseek,
247 .release = seq_release,
248 };
249
250
251 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 {
253 int cpu;
254
255 if (*pos == 0)
256 return SEQ_START_TOKEN;
257
258 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
259 if (!cpu_possible(cpu))
260 continue;
261 *pos = cpu+1;
262 return &per_cpu(rt_cache_stat, cpu);
263 }
264 return NULL;
265 }
266
267 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
268 {
269 int cpu;
270
271 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
272 if (!cpu_possible(cpu))
273 continue;
274 *pos = cpu+1;
275 return &per_cpu(rt_cache_stat, cpu);
276 }
277 return NULL;
278
279 }
280
281 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 {
283
284 }
285
286 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287 {
288 struct rt_cache_stat *st = v;
289
290 if (v == SEQ_START_TOKEN) {
291 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
292 return 0;
293 }
294
295 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
296 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
297 dst_entries_get_slow(&ipv4_dst_ops),
298 st->in_hit,
299 st->in_slow_tot,
300 st->in_slow_mc,
301 st->in_no_route,
302 st->in_brd,
303 st->in_martian_dst,
304 st->in_martian_src,
305
306 st->out_hit,
307 st->out_slow_tot,
308 st->out_slow_mc,
309
310 st->gc_total,
311 st->gc_ignored,
312 st->gc_goal_miss,
313 st->gc_dst_overflow,
314 st->in_hlist_search,
315 st->out_hlist_search
316 );
317 return 0;
318 }
319
320 static const struct seq_operations rt_cpu_seq_ops = {
321 .start = rt_cpu_seq_start,
322 .next = rt_cpu_seq_next,
323 .stop = rt_cpu_seq_stop,
324 .show = rt_cpu_seq_show,
325 };
326
327
328 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329 {
330 return seq_open(file, &rt_cpu_seq_ops);
331 }
332
333 static const struct file_operations rt_cpu_seq_fops = {
334 .owner = THIS_MODULE,
335 .open = rt_cpu_seq_open,
336 .read = seq_read,
337 .llseek = seq_lseek,
338 .release = seq_release,
339 };
340
341 #ifdef CONFIG_IP_ROUTE_CLASSID
342 static int rt_acct_proc_show(struct seq_file *m, void *v)
343 {
344 struct ip_rt_acct *dst, *src;
345 unsigned int i, j;
346
347 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
348 if (!dst)
349 return -ENOMEM;
350
351 for_each_possible_cpu(i) {
352 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
353 for (j = 0; j < 256; j++) {
354 dst[j].o_bytes += src[j].o_bytes;
355 dst[j].o_packets += src[j].o_packets;
356 dst[j].i_bytes += src[j].i_bytes;
357 dst[j].i_packets += src[j].i_packets;
358 }
359 }
360
361 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 kfree(dst);
363 return 0;
364 }
365
366 static int rt_acct_proc_open(struct inode *inode, struct file *file)
367 {
368 return single_open(file, rt_acct_proc_show, NULL);
369 }
370
371 static const struct file_operations rt_acct_proc_fops = {
372 .owner = THIS_MODULE,
373 .open = rt_acct_proc_open,
374 .read = seq_read,
375 .llseek = seq_lseek,
376 .release = single_release,
377 };
378 #endif
379
380 static int __net_init ip_rt_do_proc_init(struct net *net)
381 {
382 struct proc_dir_entry *pde;
383
384 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
385 &rt_cache_seq_fops);
386 if (!pde)
387 goto err1;
388
389 pde = proc_create("rt_cache", S_IRUGO,
390 net->proc_net_stat, &rt_cpu_seq_fops);
391 if (!pde)
392 goto err2;
393
394 #ifdef CONFIG_IP_ROUTE_CLASSID
395 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
396 if (!pde)
397 goto err3;
398 #endif
399 return 0;
400
401 #ifdef CONFIG_IP_ROUTE_CLASSID
402 err3:
403 remove_proc_entry("rt_cache", net->proc_net_stat);
404 #endif
405 err2:
406 remove_proc_entry("rt_cache", net->proc_net);
407 err1:
408 return -ENOMEM;
409 }
410
411 static void __net_exit ip_rt_do_proc_exit(struct net *net)
412 {
413 remove_proc_entry("rt_cache", net->proc_net_stat);
414 remove_proc_entry("rt_cache", net->proc_net);
415 #ifdef CONFIG_IP_ROUTE_CLASSID
416 remove_proc_entry("rt_acct", net->proc_net);
417 #endif
418 }
419
420 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
421 .init = ip_rt_do_proc_init,
422 .exit = ip_rt_do_proc_exit,
423 };
424
425 static int __init ip_rt_proc_init(void)
426 {
427 return register_pernet_subsys(&ip_rt_proc_ops);
428 }
429
430 #else
431 static inline int ip_rt_proc_init(void)
432 {
433 return 0;
434 }
435 #endif /* CONFIG_PROC_FS */
436
437 static inline bool rt_is_expired(const struct rtable *rth)
438 {
439 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
440 }
441
442 void rt_cache_flush(struct net *net)
443 {
444 rt_genid_bump(net);
445 }
446
447 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
448 struct sk_buff *skb,
449 const void *daddr)
450 {
451 struct net_device *dev = dst->dev;
452 const __be32 *pkey = daddr;
453 const struct rtable *rt;
454 struct neighbour *n;
455
456 rt = (const struct rtable *) dst;
457 if (rt->rt_gateway)
458 pkey = (const __be32 *) &rt->rt_gateway;
459 else if (skb)
460 pkey = &ip_hdr(skb)->daddr;
461
462 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
463 if (n)
464 return n;
465 return neigh_create(&arp_tbl, pkey, dev);
466 }
467
468 #define IP_IDENTS_SZ 2048u
469 struct ip_ident_bucket {
470 atomic_t id;
471 u32 stamp32;
472 };
473
474 static struct ip_ident_bucket *ip_idents __read_mostly;
475
476 /* In order to protect privacy, we add a perturbation to identifiers
477 * if one generator is seldom used. This makes hard for an attacker
478 * to infer how many packets were sent between two points in time.
479 */
480 u32 ip_idents_reserve(u32 hash, int segs)
481 {
482 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
483 u32 old = ACCESS_ONCE(bucket->stamp32);
484 u32 now = (u32)jiffies;
485 u32 delta = 0;
486
487 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) {
488 u64 x = prandom_u32();
489
490 x *= (now - old);
491 delta = (u32)(x >> 32);
492 }
493
494 return atomic_add_return(segs + delta, &bucket->id) - segs;
495 }
496 EXPORT_SYMBOL(ip_idents_reserve);
497
498 void __ip_select_ident(struct iphdr *iph, int segs)
499 {
500 static u32 ip_idents_hashrnd __read_mostly;
501 static bool hashrnd_initialized = false;
502 u32 hash, id;
503
504 if (unlikely(!hashrnd_initialized)) {
505 hashrnd_initialized = true;
506 get_random_bytes(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
507 }
508
509 hash = jhash_3words((__force u32)iph->daddr,
510 (__force u32)iph->saddr,
511 iph->protocol,
512 ip_idents_hashrnd);
513 id = ip_idents_reserve(hash, segs);
514 iph->id = htons(id);
515 }
516 EXPORT_SYMBOL(__ip_select_ident);
517
518 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
519 const struct sock *sk,
520 const struct iphdr *iph,
521 int oif, u8 tos,
522 u8 prot, u32 mark, int flow_flags)
523 {
524 if (sk) {
525 const struct inet_sock *inet = inet_sk(sk);
526
527 oif = sk->sk_bound_dev_if;
528 mark = sk->sk_mark;
529 tos = RT_CONN_FLAGS(sk);
530 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
531 }
532 flowi4_init_output(fl4, oif, mark, tos,
533 RT_SCOPE_UNIVERSE, prot,
534 flow_flags,
535 iph->daddr, iph->saddr, 0, 0,
536 sock_net_uid(net, sk));
537 }
538
539 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
540 const struct sock *sk)
541 {
542 const struct iphdr *iph = ip_hdr(skb);
543 int oif = skb->dev->ifindex;
544 u8 tos = RT_TOS(iph->tos);
545 u8 prot = iph->protocol;
546 u32 mark = skb->mark;
547
548 __build_flow_key(sock_net(sk), fl4, sk, iph, oif, tos, prot, mark, 0);
549 }
550
551 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
552 {
553 const struct inet_sock *inet = inet_sk(sk);
554 const struct ip_options_rcu *inet_opt;
555 __be32 daddr = inet->inet_daddr;
556
557 rcu_read_lock();
558 inet_opt = rcu_dereference(inet->inet_opt);
559 if (inet_opt && inet_opt->opt.srr)
560 daddr = inet_opt->opt.faddr;
561 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
562 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
563 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
564 inet_sk_flowi_flags(sk),
565 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
566 rcu_read_unlock();
567 }
568
569 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
570 const struct sk_buff *skb)
571 {
572 if (skb)
573 build_skb_flow_key(fl4, skb, sk);
574 else
575 build_sk_flow_key(fl4, sk);
576 }
577
578 static inline void rt_free(struct rtable *rt)
579 {
580 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
581 }
582
583 static DEFINE_SPINLOCK(fnhe_lock);
584
585 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
586 {
587 struct fib_nh_exception *fnhe, *oldest;
588 struct rtable *orig;
589
590 oldest = rcu_dereference(hash->chain);
591 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
592 fnhe = rcu_dereference(fnhe->fnhe_next)) {
593 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
594 oldest = fnhe;
595 }
596 orig = rcu_dereference(oldest->fnhe_rth);
597 if (orig) {
598 RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
599 rt_free(orig);
600 }
601 return oldest;
602 }
603
604 static inline u32 fnhe_hashfun(__be32 daddr)
605 {
606 u32 hval;
607
608 hval = (__force u32) daddr;
609 hval ^= (hval >> 11) ^ (hval >> 22);
610
611 return hval & (FNHE_HASH_SIZE - 1);
612 }
613
614 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
615 u32 pmtu, unsigned long expires)
616 {
617 struct fnhe_hash_bucket *hash;
618 struct fib_nh_exception *fnhe;
619 int depth;
620 u32 hval = fnhe_hashfun(daddr);
621
622 spin_lock_bh(&fnhe_lock);
623
624 hash = nh->nh_exceptions;
625 if (!hash) {
626 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
627 if (!hash)
628 goto out_unlock;
629 nh->nh_exceptions = hash;
630 }
631
632 hash += hval;
633
634 depth = 0;
635 for (fnhe = rcu_dereference(hash->chain); fnhe;
636 fnhe = rcu_dereference(fnhe->fnhe_next)) {
637 if (fnhe->fnhe_daddr == daddr)
638 break;
639 depth++;
640 }
641
642 if (fnhe) {
643 if (gw)
644 fnhe->fnhe_gw = gw;
645 if (pmtu) {
646 fnhe->fnhe_pmtu = pmtu;
647 fnhe->fnhe_expires = expires;
648 }
649 } else {
650 if (depth > FNHE_RECLAIM_DEPTH)
651 fnhe = fnhe_oldest(hash);
652 else {
653 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
654 if (!fnhe)
655 goto out_unlock;
656
657 fnhe->fnhe_next = hash->chain;
658 rcu_assign_pointer(hash->chain, fnhe);
659 }
660 fnhe->fnhe_daddr = daddr;
661 fnhe->fnhe_gw = gw;
662 fnhe->fnhe_pmtu = pmtu;
663 fnhe->fnhe_expires = expires;
664 }
665
666 fnhe->fnhe_stamp = jiffies;
667
668 out_unlock:
669 spin_unlock_bh(&fnhe_lock);
670 return;
671 }
672
673 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
674 bool kill_route)
675 {
676 __be32 new_gw = icmp_hdr(skb)->un.gateway;
677 __be32 old_gw = ip_hdr(skb)->saddr;
678 struct net_device *dev = skb->dev;
679 struct in_device *in_dev;
680 struct fib_result res;
681 struct neighbour *n;
682 struct net *net;
683
684 switch (icmp_hdr(skb)->code & 7) {
685 case ICMP_REDIR_NET:
686 case ICMP_REDIR_NETTOS:
687 case ICMP_REDIR_HOST:
688 case ICMP_REDIR_HOSTTOS:
689 break;
690
691 default:
692 return;
693 }
694
695 if (rt->rt_gateway != old_gw)
696 return;
697
698 in_dev = __in_dev_get_rcu(dev);
699 if (!in_dev)
700 return;
701
702 net = dev_net(dev);
703 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
704 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
705 ipv4_is_zeronet(new_gw))
706 goto reject_redirect;
707
708 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
709 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
710 goto reject_redirect;
711 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
712 goto reject_redirect;
713 } else {
714 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
715 goto reject_redirect;
716 }
717
718 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
719 if (!n)
720 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
721 if (!IS_ERR(n)) {
722 if (!(n->nud_state & NUD_VALID)) {
723 neigh_event_send(n, NULL);
724 } else {
725 if (fib_lookup(net, fl4, &res) == 0) {
726 struct fib_nh *nh = &FIB_RES_NH(res);
727
728 update_or_create_fnhe(nh, fl4->daddr, new_gw,
729 0, 0);
730 }
731 if (kill_route)
732 rt->dst.obsolete = DST_OBSOLETE_KILL;
733 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
734 }
735 neigh_release(n);
736 }
737 return;
738
739 reject_redirect:
740 #ifdef CONFIG_IP_ROUTE_VERBOSE
741 if (IN_DEV_LOG_MARTIANS(in_dev)) {
742 const struct iphdr *iph = (const struct iphdr *) skb->data;
743 __be32 daddr = iph->daddr;
744 __be32 saddr = iph->saddr;
745
746 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
747 " Advised path = %pI4 -> %pI4\n",
748 &old_gw, dev->name, &new_gw,
749 &saddr, &daddr);
750 }
751 #endif
752 ;
753 }
754
755 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
756 {
757 struct rtable *rt;
758 struct flowi4 fl4;
759 const struct iphdr *iph = (const struct iphdr *) skb->data;
760 int oif = skb->dev->ifindex;
761 u8 tos = RT_TOS(iph->tos);
762 u8 prot = iph->protocol;
763 u32 mark = skb->mark;
764
765 rt = (struct rtable *) dst;
766
767 __build_flow_key(sock_net(sk), &fl4, sk, iph, oif, tos, prot, mark, 0);
768 __ip_do_redirect(rt, skb, &fl4, true);
769 }
770
771 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
772 {
773 struct rtable *rt = (struct rtable *)dst;
774 struct dst_entry *ret = dst;
775
776 if (rt) {
777 if (dst->obsolete > 0) {
778 ip_rt_put(rt);
779 ret = NULL;
780 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
781 rt->dst.expires) {
782 ip_rt_put(rt);
783 ret = NULL;
784 }
785 }
786 return ret;
787 }
788
789 /*
790 * Algorithm:
791 * 1. The first ip_rt_redirect_number redirects are sent
792 * with exponential backoff, then we stop sending them at all,
793 * assuming that the host ignores our redirects.
794 * 2. If we did not see packets requiring redirects
795 * during ip_rt_redirect_silence, we assume that the host
796 * forgot redirected route and start to send redirects again.
797 *
798 * This algorithm is much cheaper and more intelligent than dumb load limiting
799 * in icmp.c.
800 *
801 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
802 * and "frag. need" (breaks PMTU discovery) in icmp.c.
803 */
804
805 void ip_rt_send_redirect(struct sk_buff *skb)
806 {
807 struct rtable *rt = skb_rtable(skb);
808 struct in_device *in_dev;
809 struct inet_peer *peer;
810 struct net *net;
811 int log_martians;
812
813 rcu_read_lock();
814 in_dev = __in_dev_get_rcu(rt->dst.dev);
815 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
816 rcu_read_unlock();
817 return;
818 }
819 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
820 rcu_read_unlock();
821
822 net = dev_net(rt->dst.dev);
823 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
824 if (!peer) {
825 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
826 rt_nexthop(rt, ip_hdr(skb)->daddr));
827 return;
828 }
829
830 /* No redirected packets during ip_rt_redirect_silence;
831 * reset the algorithm.
832 */
833 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
834 peer->rate_tokens = 0;
835
836 /* Too many ignored redirects; do not send anything
837 * set dst.rate_last to the last seen redirected packet.
838 */
839 if (peer->rate_tokens >= ip_rt_redirect_number) {
840 peer->rate_last = jiffies;
841 goto out_put_peer;
842 }
843
844 /* Check for load limit; set rate_last to the latest sent
845 * redirect.
846 */
847 if (peer->rate_tokens == 0 ||
848 time_after(jiffies,
849 (peer->rate_last +
850 (ip_rt_redirect_load << peer->rate_tokens)))) {
851 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
852
853 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
854 peer->rate_last = jiffies;
855 ++peer->rate_tokens;
856 #ifdef CONFIG_IP_ROUTE_VERBOSE
857 if (log_martians &&
858 peer->rate_tokens == ip_rt_redirect_number)
859 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
860 &ip_hdr(skb)->saddr, inet_iif(skb),
861 &ip_hdr(skb)->daddr, &gw);
862 #endif
863 }
864 out_put_peer:
865 inet_putpeer(peer);
866 }
867
868 static int ip_error(struct sk_buff *skb)
869 {
870 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
871 struct rtable *rt = skb_rtable(skb);
872 struct inet_peer *peer;
873 unsigned long now;
874 struct net *net;
875 bool send;
876 int code;
877
878 /* IP on this device is disabled. */
879 if (!in_dev)
880 goto out;
881
882 net = dev_net(rt->dst.dev);
883 if (!IN_DEV_FORWARD(in_dev)) {
884 switch (rt->dst.error) {
885 case EHOSTUNREACH:
886 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
887 break;
888
889 case ENETUNREACH:
890 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
891 break;
892 }
893 goto out;
894 }
895
896 switch (rt->dst.error) {
897 case EINVAL:
898 default:
899 goto out;
900 case EHOSTUNREACH:
901 code = ICMP_HOST_UNREACH;
902 break;
903 case ENETUNREACH:
904 code = ICMP_NET_UNREACH;
905 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
906 break;
907 case EACCES:
908 code = ICMP_PKT_FILTERED;
909 break;
910 }
911
912 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
913
914 send = true;
915 if (peer) {
916 now = jiffies;
917 peer->rate_tokens += now - peer->rate_last;
918 if (peer->rate_tokens > ip_rt_error_burst)
919 peer->rate_tokens = ip_rt_error_burst;
920 peer->rate_last = now;
921 if (peer->rate_tokens >= ip_rt_error_cost)
922 peer->rate_tokens -= ip_rt_error_cost;
923 else
924 send = false;
925 inet_putpeer(peer);
926 }
927 if (send)
928 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
929
930 out: kfree_skb(skb);
931 return 0;
932 }
933
934 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
935 {
936 struct dst_entry *dst = &rt->dst;
937 struct fib_result res;
938
939 if (dst_metric_locked(dst, RTAX_MTU))
940 return;
941
942 if (dst->dev->mtu < mtu)
943 return;
944
945 if (mtu < ip_rt_min_pmtu)
946 mtu = ip_rt_min_pmtu;
947
948 if (!rt->rt_pmtu) {
949 dst->obsolete = DST_OBSOLETE_KILL;
950 } else {
951 rt->rt_pmtu = mtu;
952 dst->expires = max(1UL, jiffies + ip_rt_mtu_expires);
953 }
954
955 rcu_read_lock();
956 if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
957 struct fib_nh *nh = &FIB_RES_NH(res);
958
959 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
960 jiffies + ip_rt_mtu_expires);
961 }
962 rcu_read_unlock();
963 }
964
965 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
966 struct sk_buff *skb, u32 mtu)
967 {
968 struct rtable *rt = (struct rtable *) dst;
969 struct flowi4 fl4;
970
971 ip_rt_build_flow_key(&fl4, sk, skb);
972 __ip_rt_update_pmtu(rt, &fl4, mtu);
973 }
974
975 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
976 int oif, u32 mark, u8 protocol, int flow_flags)
977 {
978 const struct iphdr *iph = (const struct iphdr *) skb->data;
979 struct flowi4 fl4;
980 struct rtable *rt;
981
982 if (!mark)
983 mark = IP4_REPLY_MARK(net, skb->mark);
984
985 __build_flow_key(net, &fl4, NULL, iph, oif,
986 RT_TOS(iph->tos), protocol, mark, flow_flags);
987 rt = __ip_route_output_key(net, &fl4);
988 if (!IS_ERR(rt)) {
989 __ip_rt_update_pmtu(rt, &fl4, mtu);
990 ip_rt_put(rt);
991 }
992 }
993 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
994
995 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
996 {
997 const struct iphdr *iph = (const struct iphdr *) skb->data;
998 struct flowi4 fl4;
999 struct rtable *rt;
1000
1001 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1002
1003 if (!fl4.flowi4_mark)
1004 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1005
1006 rt = __ip_route_output_key(sock_net(sk), &fl4);
1007 if (!IS_ERR(rt)) {
1008 __ip_rt_update_pmtu(rt, &fl4, mtu);
1009 ip_rt_put(rt);
1010 }
1011 }
1012
1013 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1014 {
1015 const struct iphdr *iph = (const struct iphdr *) skb->data;
1016 struct flowi4 fl4;
1017 struct rtable *rt;
1018 struct dst_entry *odst = NULL;
1019 bool new = false;
1020 struct net *net = sock_net(sk);
1021
1022 bh_lock_sock(sk);
1023 odst = sk_dst_get(sk);
1024
1025 if (sock_owned_by_user(sk) || !odst) {
1026 __ipv4_sk_update_pmtu(skb, sk, mtu);
1027 goto out;
1028 }
1029
1030 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1031
1032 rt = (struct rtable *)odst;
1033 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1034 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1035 if (IS_ERR(rt))
1036 goto out;
1037
1038 new = true;
1039 }
1040
1041 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1042
1043 if (!dst_check(&rt->dst, 0)) {
1044 if (new)
1045 dst_release(&rt->dst);
1046
1047 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1048 if (IS_ERR(rt))
1049 goto out;
1050
1051 new = true;
1052 }
1053
1054 if (new)
1055 sk_dst_set(sk, &rt->dst);
1056
1057 out:
1058 bh_unlock_sock(sk);
1059 dst_release(odst);
1060 }
1061 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1062
1063 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1064 int oif, u32 mark, u8 protocol, int flow_flags)
1065 {
1066 const struct iphdr *iph = (const struct iphdr *) skb->data;
1067 struct flowi4 fl4;
1068 struct rtable *rt;
1069
1070 __build_flow_key(net, &fl4, NULL, iph, oif,
1071 RT_TOS(iph->tos), protocol, mark, flow_flags);
1072 rt = __ip_route_output_key(net, &fl4);
1073 if (!IS_ERR(rt)) {
1074 __ip_do_redirect(rt, skb, &fl4, false);
1075 ip_rt_put(rt);
1076 }
1077 }
1078 EXPORT_SYMBOL_GPL(ipv4_redirect);
1079
1080 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1081 {
1082 const struct iphdr *iph = (const struct iphdr *) skb->data;
1083 struct flowi4 fl4;
1084 struct rtable *rt;
1085 struct net *net = sock_net(sk);
1086
1087 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1088 rt = __ip_route_output_key(net, &fl4);
1089 if (!IS_ERR(rt)) {
1090 __ip_do_redirect(rt, skb, &fl4, false);
1091 ip_rt_put(rt);
1092 }
1093 }
1094 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1095
1096 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1097 {
1098 struct rtable *rt = (struct rtable *) dst;
1099
1100 /* All IPV4 dsts are created with ->obsolete set to the value
1101 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1102 * into this function always.
1103 *
1104 * When a PMTU/redirect information update invalidates a
1105 * route, this is indicated by setting obsolete to
1106 * DST_OBSOLETE_KILL.
1107 */
1108 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1109 return NULL;
1110 return dst;
1111 }
1112
1113 static void ipv4_link_failure(struct sk_buff *skb)
1114 {
1115 struct rtable *rt;
1116
1117 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1118
1119 rt = skb_rtable(skb);
1120 if (rt)
1121 dst_set_expires(&rt->dst, 0);
1122 }
1123
1124 static int ip_rt_bug(struct sk_buff *skb)
1125 {
1126 pr_debug("%s: %pI4 -> %pI4, %s\n",
1127 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1128 skb->dev ? skb->dev->name : "?");
1129 kfree_skb(skb);
1130 WARN_ON(1);
1131 return 0;
1132 }
1133
1134 /*
1135 We do not cache source address of outgoing interface,
1136 because it is used only by IP RR, TS and SRR options,
1137 so that it out of fast path.
1138
1139 BTW remember: "addr" is allowed to be not aligned
1140 in IP options!
1141 */
1142
1143 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1144 {
1145 __be32 src;
1146
1147 if (rt_is_output_route(rt))
1148 src = ip_hdr(skb)->saddr;
1149 else {
1150 struct fib_result res;
1151 struct flowi4 fl4;
1152 struct iphdr *iph;
1153
1154 iph = ip_hdr(skb);
1155
1156 memset(&fl4, 0, sizeof(fl4));
1157 fl4.daddr = iph->daddr;
1158 fl4.saddr = iph->saddr;
1159 fl4.flowi4_tos = RT_TOS(iph->tos);
1160 fl4.flowi4_oif = rt->dst.dev->ifindex;
1161 fl4.flowi4_iif = skb->dev->ifindex;
1162 fl4.flowi4_mark = skb->mark;
1163
1164 rcu_read_lock();
1165 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1166 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1167 else
1168 src = inet_select_addr(rt->dst.dev,
1169 rt_nexthop(rt, iph->daddr),
1170 RT_SCOPE_UNIVERSE);
1171 rcu_read_unlock();
1172 }
1173 memcpy(addr, &src, 4);
1174 }
1175
1176 #ifdef CONFIG_IP_ROUTE_CLASSID
1177 static void set_class_tag(struct rtable *rt, u32 tag)
1178 {
1179 if (!(rt->dst.tclassid & 0xFFFF))
1180 rt->dst.tclassid |= tag & 0xFFFF;
1181 if (!(rt->dst.tclassid & 0xFFFF0000))
1182 rt->dst.tclassid |= tag & 0xFFFF0000;
1183 }
1184 #endif
1185
1186 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1187 {
1188 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1189
1190 if (advmss == 0) {
1191 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1192 ip_rt_min_advmss);
1193 if (advmss > 65535 - 40)
1194 advmss = 65535 - 40;
1195 }
1196 return advmss;
1197 }
1198
1199 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1200 {
1201 const struct rtable *rt = (const struct rtable *) dst;
1202 unsigned int mtu = rt->rt_pmtu;
1203
1204 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1205 mtu = dst_metric_raw(dst, RTAX_MTU);
1206
1207 if (mtu)
1208 return mtu;
1209
1210 mtu = dst->dev->mtu;
1211
1212 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1213 if (rt->rt_uses_gateway && mtu > 576)
1214 mtu = 576;
1215 }
1216
1217 if (mtu > IP_MAX_MTU)
1218 mtu = IP_MAX_MTU;
1219
1220 return mtu;
1221 }
1222
1223 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1224 {
1225 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1226 struct fib_nh_exception *fnhe;
1227 u32 hval;
1228
1229 if (!hash)
1230 return NULL;
1231
1232 hval = fnhe_hashfun(daddr);
1233
1234 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1235 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1236 if (fnhe->fnhe_daddr == daddr)
1237 return fnhe;
1238 }
1239 return NULL;
1240 }
1241
1242 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1243 __be32 daddr)
1244 {
1245 bool ret = false;
1246
1247 spin_lock_bh(&fnhe_lock);
1248
1249 if (daddr == fnhe->fnhe_daddr) {
1250 struct rtable *orig = rcu_dereference(fnhe->fnhe_rth);
1251 if (orig && rt_is_expired(orig)) {
1252 fnhe->fnhe_gw = 0;
1253 fnhe->fnhe_pmtu = 0;
1254 fnhe->fnhe_expires = 0;
1255 }
1256 if (fnhe->fnhe_pmtu) {
1257 unsigned long expires = fnhe->fnhe_expires;
1258 unsigned long diff = expires - jiffies;
1259
1260 if (time_before(jiffies, expires)) {
1261 rt->rt_pmtu = fnhe->fnhe_pmtu;
1262 dst_set_expires(&rt->dst, diff);
1263 }
1264 }
1265 if (fnhe->fnhe_gw) {
1266 rt->rt_flags |= RTCF_REDIRECTED;
1267 rt->rt_gateway = fnhe->fnhe_gw;
1268 rt->rt_uses_gateway = 1;
1269 } else if (!rt->rt_gateway)
1270 rt->rt_gateway = daddr;
1271
1272 rcu_assign_pointer(fnhe->fnhe_rth, rt);
1273 if (orig)
1274 rt_free(orig);
1275
1276 fnhe->fnhe_stamp = jiffies;
1277 ret = true;
1278 }
1279 spin_unlock_bh(&fnhe_lock);
1280
1281 return ret;
1282 }
1283
1284 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1285 {
1286 struct rtable *orig, *prev, **p;
1287 bool ret = true;
1288
1289 if (rt_is_input_route(rt)) {
1290 p = (struct rtable **)&nh->nh_rth_input;
1291 } else {
1292 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1293 }
1294 orig = *p;
1295
1296 prev = cmpxchg(p, orig, rt);
1297 if (prev == orig) {
1298 if (orig)
1299 rt_free(orig);
1300 } else
1301 ret = false;
1302
1303 return ret;
1304 }
1305
1306 static DEFINE_SPINLOCK(rt_uncached_lock);
1307 static LIST_HEAD(rt_uncached_list);
1308
1309 static void rt_add_uncached_list(struct rtable *rt)
1310 {
1311 spin_lock_bh(&rt_uncached_lock);
1312 list_add_tail(&rt->rt_uncached, &rt_uncached_list);
1313 spin_unlock_bh(&rt_uncached_lock);
1314 }
1315
1316 static void ipv4_dst_destroy(struct dst_entry *dst)
1317 {
1318 struct rtable *rt = (struct rtable *) dst;
1319
1320 if (!list_empty(&rt->rt_uncached)) {
1321 spin_lock_bh(&rt_uncached_lock);
1322 list_del(&rt->rt_uncached);
1323 spin_unlock_bh(&rt_uncached_lock);
1324 }
1325 }
1326
1327 void rt_flush_dev(struct net_device *dev)
1328 {
1329 if (!list_empty(&rt_uncached_list)) {
1330 struct net *net = dev_net(dev);
1331 struct rtable *rt;
1332
1333 spin_lock_bh(&rt_uncached_lock);
1334 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
1335 if (rt->dst.dev != dev)
1336 continue;
1337 rt->dst.dev = net->loopback_dev;
1338 dev_hold(rt->dst.dev);
1339 dev_put(dev);
1340 }
1341 spin_unlock_bh(&rt_uncached_lock);
1342 }
1343 }
1344
1345 static bool rt_cache_valid(const struct rtable *rt)
1346 {
1347 return rt &&
1348 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1349 !rt_is_expired(rt);
1350 }
1351
1352 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1353 const struct fib_result *res,
1354 struct fib_nh_exception *fnhe,
1355 struct fib_info *fi, u16 type, u32 itag)
1356 {
1357 bool cached = false;
1358
1359 if (fi) {
1360 struct fib_nh *nh = &FIB_RES_NH(*res);
1361
1362 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1363 rt->rt_gateway = nh->nh_gw;
1364 rt->rt_uses_gateway = 1;
1365 }
1366 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1367 #ifdef CONFIG_IP_ROUTE_CLASSID
1368 rt->dst.tclassid = nh->nh_tclassid;
1369 #endif
1370 if (unlikely(fnhe))
1371 cached = rt_bind_exception(rt, fnhe, daddr);
1372 else if (!(rt->dst.flags & DST_NOCACHE))
1373 cached = rt_cache_route(nh, rt);
1374 if (unlikely(!cached)) {
1375 /* Routes we intend to cache in nexthop exception or
1376 * FIB nexthop have the DST_NOCACHE bit clear.
1377 * However, if we are unsuccessful at storing this
1378 * route into the cache we really need to set it.
1379 */
1380 rt->dst.flags |= DST_NOCACHE;
1381 if (!rt->rt_gateway)
1382 rt->rt_gateway = daddr;
1383 rt_add_uncached_list(rt);
1384 }
1385 } else
1386 rt_add_uncached_list(rt);
1387
1388 #ifdef CONFIG_IP_ROUTE_CLASSID
1389 #ifdef CONFIG_IP_MULTIPLE_TABLES
1390 set_class_tag(rt, res->tclassid);
1391 #endif
1392 set_class_tag(rt, itag);
1393 #endif
1394 }
1395
1396 static struct rtable *rt_dst_alloc(struct net_device *dev,
1397 bool nopolicy, bool noxfrm, bool will_cache)
1398 {
1399 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1400 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1401 (nopolicy ? DST_NOPOLICY : 0) |
1402 (noxfrm ? DST_NOXFRM : 0));
1403 }
1404
1405 /* called in rcu_read_lock() section */
1406 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1407 u8 tos, struct net_device *dev, int our)
1408 {
1409 struct rtable *rth;
1410 struct in_device *in_dev = __in_dev_get_rcu(dev);
1411 u32 itag = 0;
1412 int err;
1413
1414 /* Primary sanity checks. */
1415
1416 if (in_dev == NULL)
1417 return -EINVAL;
1418
1419 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1420 skb->protocol != htons(ETH_P_IP))
1421 goto e_inval;
1422
1423 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1424 if (ipv4_is_loopback(saddr))
1425 goto e_inval;
1426
1427 if (ipv4_is_zeronet(saddr)) {
1428 if (!ipv4_is_local_multicast(daddr))
1429 goto e_inval;
1430 } else {
1431 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1432 in_dev, &itag);
1433 if (err < 0)
1434 goto e_err;
1435 }
1436 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1437 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1438 if (!rth)
1439 goto e_nobufs;
1440
1441 #ifdef CONFIG_IP_ROUTE_CLASSID
1442 rth->dst.tclassid = itag;
1443 #endif
1444 rth->dst.output = ip_rt_bug;
1445
1446 rth->rt_genid = rt_genid(dev_net(dev));
1447 rth->rt_flags = RTCF_MULTICAST;
1448 rth->rt_type = RTN_MULTICAST;
1449 rth->rt_is_input= 1;
1450 rth->rt_iif = 0;
1451 rth->rt_pmtu = 0;
1452 rth->rt_gateway = 0;
1453 rth->rt_uses_gateway = 0;
1454 INIT_LIST_HEAD(&rth->rt_uncached);
1455 if (our) {
1456 rth->dst.input= ip_local_deliver;
1457 rth->rt_flags |= RTCF_LOCAL;
1458 }
1459
1460 #ifdef CONFIG_IP_MROUTE
1461 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1462 rth->dst.input = ip_mr_input;
1463 #endif
1464 RT_CACHE_STAT_INC(in_slow_mc);
1465
1466 skb_dst_set(skb, &rth->dst);
1467 return 0;
1468
1469 e_nobufs:
1470 return -ENOBUFS;
1471 e_inval:
1472 return -EINVAL;
1473 e_err:
1474 return err;
1475 }
1476
1477
1478 static void ip_handle_martian_source(struct net_device *dev,
1479 struct in_device *in_dev,
1480 struct sk_buff *skb,
1481 __be32 daddr,
1482 __be32 saddr)
1483 {
1484 RT_CACHE_STAT_INC(in_martian_src);
1485 #ifdef CONFIG_IP_ROUTE_VERBOSE
1486 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1487 /*
1488 * RFC1812 recommendation, if source is martian,
1489 * the only hint is MAC header.
1490 */
1491 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1492 &daddr, &saddr, dev->name);
1493 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1494 print_hex_dump(KERN_WARNING, "ll header: ",
1495 DUMP_PREFIX_OFFSET, 16, 1,
1496 skb_mac_header(skb),
1497 dev->hard_header_len, true);
1498 }
1499 }
1500 #endif
1501 }
1502
1503 /* called in rcu_read_lock() section */
1504 static int __mkroute_input(struct sk_buff *skb,
1505 const struct fib_result *res,
1506 struct in_device *in_dev,
1507 __be32 daddr, __be32 saddr, u32 tos)
1508 {
1509 struct rtable *rth;
1510 int err;
1511 struct in_device *out_dev;
1512 unsigned int flags = 0;
1513 bool do_cache;
1514 u32 itag = 0;
1515
1516 /* get a working reference to the output device */
1517 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1518 if (out_dev == NULL) {
1519 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1520 return -EINVAL;
1521 }
1522
1523 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1524 in_dev->dev, in_dev, &itag);
1525 if (err < 0) {
1526 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1527 saddr);
1528
1529 goto cleanup;
1530 }
1531
1532 do_cache = res->fi && !itag;
1533 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1534 skb->protocol == htons(ETH_P_IP) &&
1535 (IN_DEV_SHARED_MEDIA(out_dev) ||
1536 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1537 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1538
1539 if (skb->protocol != htons(ETH_P_IP)) {
1540 /* Not IP (i.e. ARP). Do not create route, if it is
1541 * invalid for proxy arp. DNAT routes are always valid.
1542 *
1543 * Proxy arp feature have been extended to allow, ARP
1544 * replies back to the same interface, to support
1545 * Private VLAN switch technologies. See arp.c.
1546 */
1547 if (out_dev == in_dev &&
1548 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1549 err = -EINVAL;
1550 goto cleanup;
1551 }
1552 }
1553
1554 if (do_cache) {
1555 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1556 if (rt_cache_valid(rth)) {
1557 skb_dst_set_noref(skb, &rth->dst);
1558 goto out;
1559 }
1560 }
1561
1562 rth = rt_dst_alloc(out_dev->dev,
1563 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1564 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1565 if (!rth) {
1566 err = -ENOBUFS;
1567 goto cleanup;
1568 }
1569
1570 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1571 rth->rt_flags = flags;
1572 rth->rt_type = res->type;
1573 rth->rt_is_input = 1;
1574 rth->rt_iif = 0;
1575 rth->rt_pmtu = 0;
1576 rth->rt_gateway = 0;
1577 rth->rt_uses_gateway = 0;
1578 INIT_LIST_HEAD(&rth->rt_uncached);
1579 RT_CACHE_STAT_INC(in_slow_tot);
1580
1581 rth->dst.input = ip_forward;
1582 rth->dst.output = ip_output;
1583
1584 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
1585 skb_dst_set(skb, &rth->dst);
1586 out:
1587 err = 0;
1588 cleanup:
1589 return err;
1590 }
1591
1592 static int ip_mkroute_input(struct sk_buff *skb,
1593 struct fib_result *res,
1594 const struct flowi4 *fl4,
1595 struct in_device *in_dev,
1596 __be32 daddr, __be32 saddr, u32 tos)
1597 {
1598 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1599 if (res->fi && res->fi->fib_nhs > 1)
1600 fib_select_multipath(res);
1601 #endif
1602
1603 /* create a routing cache entry */
1604 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1605 }
1606
1607 /*
1608 * NOTE. We drop all the packets that has local source
1609 * addresses, because every properly looped back packet
1610 * must have correct destination already attached by output routine.
1611 *
1612 * Such approach solves two big problems:
1613 * 1. Not simplex devices are handled properly.
1614 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1615 * called with rcu_read_lock()
1616 */
1617
1618 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1619 u8 tos, struct net_device *dev)
1620 {
1621 struct fib_result res;
1622 struct in_device *in_dev = __in_dev_get_rcu(dev);
1623 struct flowi4 fl4;
1624 unsigned int flags = 0;
1625 u32 itag = 0;
1626 struct rtable *rth;
1627 int err = -EINVAL;
1628 struct net *net = dev_net(dev);
1629 bool do_cache;
1630
1631 /* IP on this device is disabled. */
1632
1633 if (!in_dev)
1634 goto out;
1635
1636 /* Check for the most weird martians, which can be not detected
1637 by fib_lookup.
1638 */
1639
1640 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1641 goto martian_source;
1642
1643 res.fi = NULL;
1644 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1645 goto brd_input;
1646
1647 /* Accept zero addresses only to limited broadcast;
1648 * I even do not know to fix it or not. Waiting for complains :-)
1649 */
1650 if (ipv4_is_zeronet(saddr))
1651 goto martian_source;
1652
1653 if (ipv4_is_zeronet(daddr))
1654 goto martian_destination;
1655
1656 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1657 * and call it once if daddr or/and saddr are loopback addresses
1658 */
1659 if (ipv4_is_loopback(daddr)) {
1660 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1661 goto martian_destination;
1662 } else if (ipv4_is_loopback(saddr)) {
1663 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1664 goto martian_source;
1665 }
1666
1667 /*
1668 * Now we are ready to route packet.
1669 */
1670 fl4.flowi4_oif = 0;
1671 fl4.flowi4_iif = dev->ifindex;
1672 fl4.flowi4_mark = skb->mark;
1673 fl4.flowi4_tos = tos;
1674 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1675 fl4.daddr = daddr;
1676 fl4.saddr = saddr;
1677 err = fib_lookup(net, &fl4, &res);
1678 if (err != 0)
1679 goto no_route;
1680
1681 if (res.type == RTN_BROADCAST)
1682 goto brd_input;
1683
1684 if (res.type == RTN_LOCAL) {
1685 err = fib_validate_source(skb, saddr, daddr, tos,
1686 LOOPBACK_IFINDEX,
1687 dev, in_dev, &itag);
1688 if (err < 0)
1689 goto martian_source_keep_err;
1690 goto local_input;
1691 }
1692
1693 if (!IN_DEV_FORWARD(in_dev))
1694 goto no_route;
1695 if (res.type != RTN_UNICAST)
1696 goto martian_destination;
1697
1698 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1699 out: return err;
1700
1701 brd_input:
1702 if (skb->protocol != htons(ETH_P_IP))
1703 goto e_inval;
1704
1705 if (!ipv4_is_zeronet(saddr)) {
1706 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1707 in_dev, &itag);
1708 if (err < 0)
1709 goto martian_source_keep_err;
1710 }
1711 flags |= RTCF_BROADCAST;
1712 res.type = RTN_BROADCAST;
1713 RT_CACHE_STAT_INC(in_brd);
1714
1715 local_input:
1716 do_cache = false;
1717 if (res.fi) {
1718 if (!itag) {
1719 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1720 if (rt_cache_valid(rth)) {
1721 skb_dst_set_noref(skb, &rth->dst);
1722 err = 0;
1723 goto out;
1724 }
1725 do_cache = true;
1726 }
1727 }
1728
1729 rth = rt_dst_alloc(net->loopback_dev,
1730 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1731 if (!rth)
1732 goto e_nobufs;
1733
1734 rth->dst.input= ip_local_deliver;
1735 rth->dst.output= ip_rt_bug;
1736 #ifdef CONFIG_IP_ROUTE_CLASSID
1737 rth->dst.tclassid = itag;
1738 #endif
1739
1740 rth->rt_genid = rt_genid(net);
1741 rth->rt_flags = flags|RTCF_LOCAL;
1742 rth->rt_type = res.type;
1743 rth->rt_is_input = 1;
1744 rth->rt_iif = 0;
1745 rth->rt_pmtu = 0;
1746 rth->rt_gateway = 0;
1747 rth->rt_uses_gateway = 0;
1748 INIT_LIST_HEAD(&rth->rt_uncached);
1749 RT_CACHE_STAT_INC(in_slow_tot);
1750 if (res.type == RTN_UNREACHABLE) {
1751 rth->dst.input= ip_error;
1752 rth->dst.error= -err;
1753 rth->rt_flags &= ~RTCF_LOCAL;
1754 }
1755 if (do_cache) {
1756 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1757 rth->dst.flags |= DST_NOCACHE;
1758 rt_add_uncached_list(rth);
1759 }
1760 }
1761 skb_dst_set(skb, &rth->dst);
1762 err = 0;
1763 goto out;
1764
1765 no_route:
1766 RT_CACHE_STAT_INC(in_no_route);
1767 res.type = RTN_UNREACHABLE;
1768 if (err == -ESRCH)
1769 err = -ENETUNREACH;
1770 goto local_input;
1771
1772 /*
1773 * Do not cache martian addresses: they should be logged (RFC1812)
1774 */
1775 martian_destination:
1776 RT_CACHE_STAT_INC(in_martian_dst);
1777 #ifdef CONFIG_IP_ROUTE_VERBOSE
1778 if (IN_DEV_LOG_MARTIANS(in_dev))
1779 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1780 &daddr, &saddr, dev->name);
1781 #endif
1782
1783 e_inval:
1784 err = -EINVAL;
1785 goto out;
1786
1787 e_nobufs:
1788 err = -ENOBUFS;
1789 goto out;
1790
1791 martian_source:
1792 err = -EINVAL;
1793 martian_source_keep_err:
1794 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1795 goto out;
1796 }
1797
1798 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1799 u8 tos, struct net_device *dev)
1800 {
1801 int res;
1802
1803 tos &= IPTOS_RT_MASK;
1804 rcu_read_lock();
1805
1806 /* Multicast recognition logic is moved from route cache to here.
1807 The problem was that too many Ethernet cards have broken/missing
1808 hardware multicast filters :-( As result the host on multicasting
1809 network acquires a lot of useless route cache entries, sort of
1810 SDR messages from all the world. Now we try to get rid of them.
1811 Really, provided software IP multicast filter is organized
1812 reasonably (at least, hashed), it does not result in a slowdown
1813 comparing with route cache reject entries.
1814 Note, that multicast routers are not affected, because
1815 route cache entry is created eventually.
1816 */
1817 if (ipv4_is_multicast(daddr)) {
1818 struct in_device *in_dev = __in_dev_get_rcu(dev);
1819
1820 if (in_dev) {
1821 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1822 ip_hdr(skb)->protocol);
1823 if (our
1824 #ifdef CONFIG_IP_MROUTE
1825 ||
1826 (!ipv4_is_local_multicast(daddr) &&
1827 IN_DEV_MFORWARD(in_dev))
1828 #endif
1829 ) {
1830 int res = ip_route_input_mc(skb, daddr, saddr,
1831 tos, dev, our);
1832 rcu_read_unlock();
1833 return res;
1834 }
1835 }
1836 rcu_read_unlock();
1837 return -EINVAL;
1838 }
1839 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1840 rcu_read_unlock();
1841 return res;
1842 }
1843 EXPORT_SYMBOL(ip_route_input_noref);
1844
1845 /* called with rcu_read_lock() */
1846 static struct rtable *__mkroute_output(const struct fib_result *res,
1847 const struct flowi4 *fl4, int orig_oif,
1848 struct net_device *dev_out,
1849 unsigned int flags)
1850 {
1851 struct fib_info *fi = res->fi;
1852 struct fib_nh_exception *fnhe;
1853 struct in_device *in_dev;
1854 u16 type = res->type;
1855 struct rtable *rth;
1856 bool do_cache;
1857
1858 in_dev = __in_dev_get_rcu(dev_out);
1859 if (!in_dev)
1860 return ERR_PTR(-EINVAL);
1861
1862 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1863 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1864 return ERR_PTR(-EINVAL);
1865
1866 if (ipv4_is_lbcast(fl4->daddr))
1867 type = RTN_BROADCAST;
1868 else if (ipv4_is_multicast(fl4->daddr))
1869 type = RTN_MULTICAST;
1870 else if (ipv4_is_zeronet(fl4->daddr))
1871 return ERR_PTR(-EINVAL);
1872
1873 if (dev_out->flags & IFF_LOOPBACK)
1874 flags |= RTCF_LOCAL;
1875
1876 do_cache = true;
1877 if (type == RTN_BROADCAST) {
1878 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1879 fi = NULL;
1880 } else if (type == RTN_MULTICAST) {
1881 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1882 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1883 fl4->flowi4_proto))
1884 flags &= ~RTCF_LOCAL;
1885 else
1886 do_cache = false;
1887 /* If multicast route do not exist use
1888 * default one, but do not gateway in this case.
1889 * Yes, it is hack.
1890 */
1891 if (fi && res->prefixlen < 4)
1892 fi = NULL;
1893 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
1894 (orig_oif != dev_out->ifindex)) {
1895 /* For local routes that require a particular output interface
1896 * we do not want to cache the result. Caching the result
1897 * causes incorrect behaviour when there are multiple source
1898 * addresses on the interface, the end result being that if the
1899 * intended recipient is waiting on that interface for the
1900 * packet he won't receive it because it will be delivered on
1901 * the loopback interface and the IP_PKTINFO ipi_ifindex will
1902 * be set to the loopback interface as well.
1903 */
1904 fi = NULL;
1905 }
1906
1907 fnhe = NULL;
1908 do_cache &= fi != NULL;
1909 if (do_cache) {
1910 struct rtable __rcu **prth;
1911 struct fib_nh *nh = &FIB_RES_NH(*res);
1912
1913 fnhe = find_exception(nh, fl4->daddr);
1914 if (fnhe)
1915 prth = &fnhe->fnhe_rth;
1916 else {
1917 if (unlikely(fl4->flowi4_flags &
1918 FLOWI_FLAG_KNOWN_NH &&
1919 !(nh->nh_gw &&
1920 nh->nh_scope == RT_SCOPE_LINK))) {
1921 do_cache = false;
1922 goto add;
1923 }
1924 prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
1925 }
1926 rth = rcu_dereference(*prth);
1927 if (rt_cache_valid(rth)) {
1928 dst_hold(&rth->dst);
1929 return rth;
1930 }
1931 }
1932
1933 add:
1934 rth = rt_dst_alloc(dev_out,
1935 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1936 IN_DEV_CONF_GET(in_dev, NOXFRM),
1937 do_cache);
1938 if (!rth)
1939 return ERR_PTR(-ENOBUFS);
1940
1941 rth->dst.output = ip_output;
1942
1943 rth->rt_genid = rt_genid(dev_net(dev_out));
1944 rth->rt_flags = flags;
1945 rth->rt_type = type;
1946 rth->rt_is_input = 0;
1947 rth->rt_iif = orig_oif ? : 0;
1948 rth->rt_pmtu = 0;
1949 rth->rt_gateway = 0;
1950 rth->rt_uses_gateway = 0;
1951 INIT_LIST_HEAD(&rth->rt_uncached);
1952
1953 RT_CACHE_STAT_INC(out_slow_tot);
1954
1955 if (flags & RTCF_LOCAL)
1956 rth->dst.input = ip_local_deliver;
1957 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1958 if (flags & RTCF_LOCAL &&
1959 !(dev_out->flags & IFF_LOOPBACK)) {
1960 rth->dst.output = ip_mc_output;
1961 RT_CACHE_STAT_INC(out_slow_mc);
1962 }
1963 #ifdef CONFIG_IP_MROUTE
1964 if (type == RTN_MULTICAST) {
1965 if (IN_DEV_MFORWARD(in_dev) &&
1966 !ipv4_is_local_multicast(fl4->daddr)) {
1967 rth->dst.input = ip_mr_input;
1968 rth->dst.output = ip_mc_output;
1969 }
1970 }
1971 #endif
1972 }
1973
1974 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
1975
1976 return rth;
1977 }
1978
1979 /*
1980 * Major route resolver routine.
1981 */
1982
1983 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1984 {
1985 struct net_device *dev_out = NULL;
1986 __u8 tos = RT_FL_TOS(fl4);
1987 unsigned int flags = 0;
1988 struct fib_result res;
1989 struct rtable *rth;
1990 int orig_oif;
1991
1992 res.tclassid = 0;
1993 res.fi = NULL;
1994 res.table = NULL;
1995
1996 orig_oif = fl4->flowi4_oif;
1997
1998 fl4->flowi4_iif = LOOPBACK_IFINDEX;
1999 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2000 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2001 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2002
2003 rcu_read_lock();
2004 if (fl4->saddr) {
2005 rth = ERR_PTR(-EINVAL);
2006 if (ipv4_is_multicast(fl4->saddr) ||
2007 ipv4_is_lbcast(fl4->saddr) ||
2008 ipv4_is_zeronet(fl4->saddr))
2009 goto out;
2010
2011 /* I removed check for oif == dev_out->oif here.
2012 It was wrong for two reasons:
2013 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2014 is assigned to multiple interfaces.
2015 2. Moreover, we are allowed to send packets with saddr
2016 of another iface. --ANK
2017 */
2018
2019 if (fl4->flowi4_oif == 0 &&
2020 (ipv4_is_multicast(fl4->daddr) ||
2021 ipv4_is_lbcast(fl4->daddr))) {
2022 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2023 dev_out = __ip_dev_find(net, fl4->saddr, false);
2024 if (dev_out == NULL)
2025 goto out;
2026
2027 /* Special hack: user can direct multicasts
2028 and limited broadcast via necessary interface
2029 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2030 This hack is not just for fun, it allows
2031 vic,vat and friends to work.
2032 They bind socket to loopback, set ttl to zero
2033 and expect that it will work.
2034 From the viewpoint of routing cache they are broken,
2035 because we are not allowed to build multicast path
2036 with loopback source addr (look, routing cache
2037 cannot know, that ttl is zero, so that packet
2038 will not leave this host and route is valid).
2039 Luckily, this hack is good workaround.
2040 */
2041
2042 fl4->flowi4_oif = dev_out->ifindex;
2043 goto make_route;
2044 }
2045
2046 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2047 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2048 if (!__ip_dev_find(net, fl4->saddr, false))
2049 goto out;
2050 }
2051 }
2052
2053
2054 if (fl4->flowi4_oif) {
2055 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2056 rth = ERR_PTR(-ENODEV);
2057 if (dev_out == NULL)
2058 goto out;
2059
2060 /* RACE: Check return value of inet_select_addr instead. */
2061 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2062 rth = ERR_PTR(-ENETUNREACH);
2063 goto out;
2064 }
2065 if (ipv4_is_local_multicast(fl4->daddr) ||
2066 ipv4_is_lbcast(fl4->daddr)) {
2067 if (!fl4->saddr)
2068 fl4->saddr = inet_select_addr(dev_out, 0,
2069 RT_SCOPE_LINK);
2070 goto make_route;
2071 }
2072 if (!fl4->saddr) {
2073 if (ipv4_is_multicast(fl4->daddr))
2074 fl4->saddr = inet_select_addr(dev_out, 0,
2075 fl4->flowi4_scope);
2076 else if (!fl4->daddr)
2077 fl4->saddr = inet_select_addr(dev_out, 0,
2078 RT_SCOPE_HOST);
2079 }
2080 }
2081
2082 if (!fl4->daddr) {
2083 fl4->daddr = fl4->saddr;
2084 if (!fl4->daddr)
2085 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2086 dev_out = net->loopback_dev;
2087 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2088 res.type = RTN_LOCAL;
2089 flags |= RTCF_LOCAL;
2090 goto make_route;
2091 }
2092
2093 if (fib_lookup(net, fl4, &res)) {
2094 res.fi = NULL;
2095 res.table = NULL;
2096 if (fl4->flowi4_oif) {
2097 /* Apparently, routing tables are wrong. Assume,
2098 that the destination is on link.
2099
2100 WHY? DW.
2101 Because we are allowed to send to iface
2102 even if it has NO routes and NO assigned
2103 addresses. When oif is specified, routing
2104 tables are looked up with only one purpose:
2105 to catch if destination is gatewayed, rather than
2106 direct. Moreover, if MSG_DONTROUTE is set,
2107 we send packet, ignoring both routing tables
2108 and ifaddr state. --ANK
2109
2110
2111 We could make it even if oif is unknown,
2112 likely IPv6, but we do not.
2113 */
2114
2115 if (fl4->saddr == 0)
2116 fl4->saddr = inet_select_addr(dev_out, 0,
2117 RT_SCOPE_LINK);
2118 res.type = RTN_UNICAST;
2119 goto make_route;
2120 }
2121 rth = ERR_PTR(-ENETUNREACH);
2122 goto out;
2123 }
2124
2125 if (res.type == RTN_LOCAL) {
2126 if (!fl4->saddr) {
2127 if (res.fi->fib_prefsrc)
2128 fl4->saddr = res.fi->fib_prefsrc;
2129 else
2130 fl4->saddr = fl4->daddr;
2131 }
2132 dev_out = net->loopback_dev;
2133 fl4->flowi4_oif = dev_out->ifindex;
2134 flags |= RTCF_LOCAL;
2135 goto make_route;
2136 }
2137
2138 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2139 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2140 fib_select_multipath(&res);
2141 else
2142 #endif
2143 if (!res.prefixlen &&
2144 res.table->tb_num_default > 1 &&
2145 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2146 fib_select_default(&res);
2147
2148 if (!fl4->saddr)
2149 fl4->saddr = FIB_RES_PREFSRC(net, res);
2150
2151 dev_out = FIB_RES_DEV(res);
2152 fl4->flowi4_oif = dev_out->ifindex;
2153
2154
2155 make_route:
2156 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2157
2158 out:
2159 rcu_read_unlock();
2160 return rth;
2161 }
2162 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2163
2164 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2165 {
2166 return NULL;
2167 }
2168
2169 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2170 {
2171 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2172
2173 return mtu ? : dst->dev->mtu;
2174 }
2175
2176 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2177 struct sk_buff *skb, u32 mtu)
2178 {
2179 }
2180
2181 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2182 struct sk_buff *skb)
2183 {
2184 }
2185
2186 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2187 unsigned long old)
2188 {
2189 return NULL;
2190 }
2191
2192 static struct dst_ops ipv4_dst_blackhole_ops = {
2193 .family = AF_INET,
2194 .protocol = cpu_to_be16(ETH_P_IP),
2195 .check = ipv4_blackhole_dst_check,
2196 .mtu = ipv4_blackhole_mtu,
2197 .default_advmss = ipv4_default_advmss,
2198 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2199 .redirect = ipv4_rt_blackhole_redirect,
2200 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2201 .neigh_lookup = ipv4_neigh_lookup,
2202 };
2203
2204 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2205 {
2206 struct rtable *ort = (struct rtable *) dst_orig;
2207 struct rtable *rt;
2208
2209 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2210 if (rt) {
2211 struct dst_entry *new = &rt->dst;
2212
2213 new->__use = 1;
2214 new->input = dst_discard;
2215 new->output = dst_discard;
2216
2217 new->dev = ort->dst.dev;
2218 if (new->dev)
2219 dev_hold(new->dev);
2220
2221 rt->rt_is_input = ort->rt_is_input;
2222 rt->rt_iif = ort->rt_iif;
2223 rt->rt_pmtu = ort->rt_pmtu;
2224
2225 rt->rt_genid = rt_genid(net);
2226 rt->rt_flags = ort->rt_flags;
2227 rt->rt_type = ort->rt_type;
2228 rt->rt_gateway = ort->rt_gateway;
2229 rt->rt_uses_gateway = ort->rt_uses_gateway;
2230
2231 INIT_LIST_HEAD(&rt->rt_uncached);
2232
2233 dst_free(new);
2234 }
2235
2236 dst_release(dst_orig);
2237
2238 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2239 }
2240
2241 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2242 struct sock *sk)
2243 {
2244 struct rtable *rt = __ip_route_output_key(net, flp4);
2245
2246 if (IS_ERR(rt))
2247 return rt;
2248
2249 if (flp4->flowi4_proto)
2250 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2251 flowi4_to_flowi(flp4),
2252 sk, 0);
2253
2254 return rt;
2255 }
2256 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2257
2258 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2259 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2260 u32 seq, int event, int nowait, unsigned int flags)
2261 {
2262 struct rtable *rt = skb_rtable(skb);
2263 struct rtmsg *r;
2264 struct nlmsghdr *nlh;
2265 unsigned long expires = 0;
2266 u32 error;
2267 u32 metrics[RTAX_MAX];
2268
2269 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2270 if (nlh == NULL)
2271 return -EMSGSIZE;
2272
2273 r = nlmsg_data(nlh);
2274 r->rtm_family = AF_INET;
2275 r->rtm_dst_len = 32;
2276 r->rtm_src_len = 0;
2277 r->rtm_tos = fl4->flowi4_tos;
2278 r->rtm_table = RT_TABLE_MAIN;
2279 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2280 goto nla_put_failure;
2281 r->rtm_type = rt->rt_type;
2282 r->rtm_scope = RT_SCOPE_UNIVERSE;
2283 r->rtm_protocol = RTPROT_UNSPEC;
2284 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2285 if (rt->rt_flags & RTCF_NOTIFY)
2286 r->rtm_flags |= RTM_F_NOTIFY;
2287 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2288 r->rtm_flags |= RTCF_DOREDIRECT;
2289
2290 if (nla_put_be32(skb, RTA_DST, dst))
2291 goto nla_put_failure;
2292 if (src) {
2293 r->rtm_src_len = 32;
2294 if (nla_put_be32(skb, RTA_SRC, src))
2295 goto nla_put_failure;
2296 }
2297 if (rt->dst.dev &&
2298 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2299 goto nla_put_failure;
2300 #ifdef CONFIG_IP_ROUTE_CLASSID
2301 if (rt->dst.tclassid &&
2302 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2303 goto nla_put_failure;
2304 #endif
2305 if (!rt_is_input_route(rt) &&
2306 fl4->saddr != src) {
2307 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2308 goto nla_put_failure;
2309 }
2310 if (rt->rt_uses_gateway &&
2311 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2312 goto nla_put_failure;
2313
2314 expires = rt->dst.expires;
2315 if (expires) {
2316 unsigned long now = jiffies;
2317
2318 if (time_before(now, expires))
2319 expires -= now;
2320 else
2321 expires = 0;
2322 }
2323
2324 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2325 if (rt->rt_pmtu && expires)
2326 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2327 if (rtnetlink_put_metrics(skb, metrics) < 0)
2328 goto nla_put_failure;
2329
2330 if (fl4->flowi4_mark &&
2331 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2332 goto nla_put_failure;
2333
2334 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2335 nla_put_u32(skb, RTA_UID,
2336 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2337 goto nla_put_failure;
2338
2339 error = rt->dst.error;
2340
2341 if (rt_is_input_route(rt)) {
2342 #ifdef CONFIG_IP_MROUTE
2343 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2344 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2345 int err = ipmr_get_route(net, skb,
2346 fl4->saddr, fl4->daddr,
2347 r, nowait, portid);
2348
2349 if (err <= 0) {
2350 if (!nowait) {
2351 if (err == 0)
2352 return 0;
2353 goto nla_put_failure;
2354 } else {
2355 if (err == -EMSGSIZE)
2356 goto nla_put_failure;
2357 error = err;
2358 }
2359 }
2360 } else
2361 #endif
2362 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2363 goto nla_put_failure;
2364 }
2365
2366 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2367 goto nla_put_failure;
2368
2369 return nlmsg_end(skb, nlh);
2370
2371 nla_put_failure:
2372 nlmsg_cancel(skb, nlh);
2373 return -EMSGSIZE;
2374 }
2375
2376 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2377 {
2378 struct net *net = sock_net(in_skb->sk);
2379 struct rtmsg *rtm;
2380 struct nlattr *tb[RTA_MAX+1];
2381 struct rtable *rt = NULL;
2382 struct flowi4 fl4;
2383 __be32 dst = 0;
2384 __be32 src = 0;
2385 u32 iif;
2386 int err;
2387 int mark;
2388 struct sk_buff *skb;
2389 kuid_t uid;
2390
2391 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2392 if (err < 0)
2393 goto errout;
2394
2395 rtm = nlmsg_data(nlh);
2396
2397 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2398 if (skb == NULL) {
2399 err = -ENOBUFS;
2400 goto errout;
2401 }
2402
2403 /* Reserve room for dummy headers, this skb can pass
2404 through good chunk of routing engine.
2405 */
2406 skb_reset_mac_header(skb);
2407 skb_reset_network_header(skb);
2408
2409 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2410 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2411 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2412
2413 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2414 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2415 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2416 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2417 if (tb[RTA_UID])
2418 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2419 else
2420 uid = (iif ? INVALID_UID : current_uid());
2421
2422 memset(&fl4, 0, sizeof(fl4));
2423 fl4.daddr = dst;
2424 fl4.saddr = src;
2425 fl4.flowi4_tos = rtm->rtm_tos;
2426 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2427 fl4.flowi4_mark = mark;
2428 fl4.flowi4_uid = uid;
2429
2430 if (iif) {
2431 struct net_device *dev;
2432
2433 dev = __dev_get_by_index(net, iif);
2434 if (dev == NULL) {
2435 err = -ENODEV;
2436 goto errout_free;
2437 }
2438
2439 skb->protocol = htons(ETH_P_IP);
2440 skb->dev = dev;
2441 skb->mark = mark;
2442 local_bh_disable();
2443 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2444 local_bh_enable();
2445
2446 rt = skb_rtable(skb);
2447 if (err == 0 && rt->dst.error)
2448 err = -rt->dst.error;
2449 } else {
2450 rt = ip_route_output_key(net, &fl4);
2451
2452 err = 0;
2453 if (IS_ERR(rt))
2454 err = PTR_ERR(rt);
2455 }
2456
2457 if (err)
2458 goto errout_free;
2459
2460 skb_dst_set(skb, &rt->dst);
2461 if (rtm->rtm_flags & RTM_F_NOTIFY)
2462 rt->rt_flags |= RTCF_NOTIFY;
2463
2464 err = rt_fill_info(net, dst, src, &fl4, skb,
2465 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2466 RTM_NEWROUTE, 0, 0);
2467 if (err <= 0)
2468 goto errout_free;
2469
2470 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2471 errout:
2472 return err;
2473
2474 errout_free:
2475 kfree_skb(skb);
2476 goto errout;
2477 }
2478
2479 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2480 {
2481 return skb->len;
2482 }
2483
2484 void ip_rt_multicast_event(struct in_device *in_dev)
2485 {
2486 rt_cache_flush(dev_net(in_dev->dev));
2487 }
2488
2489 #ifdef CONFIG_SYSCTL
2490 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2491 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2492 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2493 static int ip_rt_gc_elasticity __read_mostly = 8;
2494
2495 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2496 void __user *buffer,
2497 size_t *lenp, loff_t *ppos)
2498 {
2499 if (write) {
2500 rt_cache_flush((struct net *)__ctl->extra1);
2501 return 0;
2502 }
2503
2504 return -EINVAL;
2505 }
2506
2507 static ctl_table ipv4_route_table[] = {
2508 {
2509 .procname = "gc_thresh",
2510 .data = &ipv4_dst_ops.gc_thresh,
2511 .maxlen = sizeof(int),
2512 .mode = 0644,
2513 .proc_handler = proc_dointvec,
2514 },
2515 {
2516 .procname = "max_size",
2517 .data = &ip_rt_max_size,
2518 .maxlen = sizeof(int),
2519 .mode = 0644,
2520 .proc_handler = proc_dointvec,
2521 },
2522 {
2523 /* Deprecated. Use gc_min_interval_ms */
2524
2525 .procname = "gc_min_interval",
2526 .data = &ip_rt_gc_min_interval,
2527 .maxlen = sizeof(int),
2528 .mode = 0644,
2529 .proc_handler = proc_dointvec_jiffies,
2530 },
2531 {
2532 .procname = "gc_min_interval_ms",
2533 .data = &ip_rt_gc_min_interval,
2534 .maxlen = sizeof(int),
2535 .mode = 0644,
2536 .proc_handler = proc_dointvec_ms_jiffies,
2537 },
2538 {
2539 .procname = "gc_timeout",
2540 .data = &ip_rt_gc_timeout,
2541 .maxlen = sizeof(int),
2542 .mode = 0644,
2543 .proc_handler = proc_dointvec_jiffies,
2544 },
2545 {
2546 .procname = "gc_interval",
2547 .data = &ip_rt_gc_interval,
2548 .maxlen = sizeof(int),
2549 .mode = 0644,
2550 .proc_handler = proc_dointvec_jiffies,
2551 },
2552 {
2553 .procname = "redirect_load",
2554 .data = &ip_rt_redirect_load,
2555 .maxlen = sizeof(int),
2556 .mode = 0644,
2557 .proc_handler = proc_dointvec,
2558 },
2559 {
2560 .procname = "redirect_number",
2561 .data = &ip_rt_redirect_number,
2562 .maxlen = sizeof(int),
2563 .mode = 0644,
2564 .proc_handler = proc_dointvec,
2565 },
2566 {
2567 .procname = "redirect_silence",
2568 .data = &ip_rt_redirect_silence,
2569 .maxlen = sizeof(int),
2570 .mode = 0644,
2571 .proc_handler = proc_dointvec,
2572 },
2573 {
2574 .procname = "error_cost",
2575 .data = &ip_rt_error_cost,
2576 .maxlen = sizeof(int),
2577 .mode = 0644,
2578 .proc_handler = proc_dointvec,
2579 },
2580 {
2581 .procname = "error_burst",
2582 .data = &ip_rt_error_burst,
2583 .maxlen = sizeof(int),
2584 .mode = 0644,
2585 .proc_handler = proc_dointvec,
2586 },
2587 {
2588 .procname = "gc_elasticity",
2589 .data = &ip_rt_gc_elasticity,
2590 .maxlen = sizeof(int),
2591 .mode = 0644,
2592 .proc_handler = proc_dointvec,
2593 },
2594 {
2595 .procname = "mtu_expires",
2596 .data = &ip_rt_mtu_expires,
2597 .maxlen = sizeof(int),
2598 .mode = 0644,
2599 .proc_handler = proc_dointvec_jiffies,
2600 },
2601 {
2602 .procname = "min_pmtu",
2603 .data = &ip_rt_min_pmtu,
2604 .maxlen = sizeof(int),
2605 .mode = 0644,
2606 .proc_handler = proc_dointvec,
2607 },
2608 {
2609 .procname = "min_adv_mss",
2610 .data = &ip_rt_min_advmss,
2611 .maxlen = sizeof(int),
2612 .mode = 0644,
2613 .proc_handler = proc_dointvec,
2614 },
2615 { }
2616 };
2617
2618 static struct ctl_table ipv4_route_flush_table[] = {
2619 {
2620 .procname = "flush",
2621 .maxlen = sizeof(int),
2622 .mode = 0200,
2623 .proc_handler = ipv4_sysctl_rtcache_flush,
2624 },
2625 { },
2626 };
2627
2628 static __net_init int sysctl_route_net_init(struct net *net)
2629 {
2630 struct ctl_table *tbl;
2631
2632 tbl = ipv4_route_flush_table;
2633 if (!net_eq(net, &init_net)) {
2634 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2635 if (tbl == NULL)
2636 goto err_dup;
2637
2638 /* Don't export sysctls to unprivileged users */
2639 if (net->user_ns != &init_user_ns)
2640 tbl[0].procname = NULL;
2641 }
2642 tbl[0].extra1 = net;
2643
2644 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2645 if (net->ipv4.route_hdr == NULL)
2646 goto err_reg;
2647 return 0;
2648
2649 err_reg:
2650 if (tbl != ipv4_route_flush_table)
2651 kfree(tbl);
2652 err_dup:
2653 return -ENOMEM;
2654 }
2655
2656 static __net_exit void sysctl_route_net_exit(struct net *net)
2657 {
2658 struct ctl_table *tbl;
2659
2660 tbl = net->ipv4.route_hdr->ctl_table_arg;
2661 unregister_net_sysctl_table(net->ipv4.route_hdr);
2662 BUG_ON(tbl == ipv4_route_flush_table);
2663 kfree(tbl);
2664 }
2665
2666 static __net_initdata struct pernet_operations sysctl_route_ops = {
2667 .init = sysctl_route_net_init,
2668 .exit = sysctl_route_net_exit,
2669 };
2670 #endif
2671
2672 static __net_init int rt_genid_init(struct net *net)
2673 {
2674 atomic_set(&net->rt_genid, 0);
2675 get_random_bytes(&net->ipv4.dev_addr_genid,
2676 sizeof(net->ipv4.dev_addr_genid));
2677 return 0;
2678 }
2679
2680 static __net_initdata struct pernet_operations rt_genid_ops = {
2681 .init = rt_genid_init,
2682 };
2683
2684 static int __net_init ipv4_inetpeer_init(struct net *net)
2685 {
2686 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2687
2688 if (!bp)
2689 return -ENOMEM;
2690 inet_peer_base_init(bp);
2691 net->ipv4.peers = bp;
2692 return 0;
2693 }
2694
2695 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2696 {
2697 struct inet_peer_base *bp = net->ipv4.peers;
2698
2699 net->ipv4.peers = NULL;
2700 inetpeer_invalidate_tree(bp);
2701 kfree(bp);
2702 }
2703
2704 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2705 .init = ipv4_inetpeer_init,
2706 .exit = ipv4_inetpeer_exit,
2707 };
2708
2709 #ifdef CONFIG_IP_ROUTE_CLASSID
2710 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2711 #endif /* CONFIG_IP_ROUTE_CLASSID */
2712
2713 int __init ip_rt_init(void)
2714 {
2715 int rc = 0;
2716
2717 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2718 if (!ip_idents)
2719 panic("IP: failed to allocate ip_idents\n");
2720
2721 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2722
2723 #ifdef CONFIG_IP_ROUTE_CLASSID
2724 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2725 if (!ip_rt_acct)
2726 panic("IP: failed to allocate ip_rt_acct\n");
2727 #endif
2728
2729 ipv4_dst_ops.kmem_cachep =
2730 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2731 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2732
2733 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2734
2735 if (dst_entries_init(&ipv4_dst_ops) < 0)
2736 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2737
2738 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2739 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2740
2741 ipv4_dst_ops.gc_thresh = ~0;
2742 ip_rt_max_size = INT_MAX;
2743
2744 devinet_init();
2745 ip_fib_init();
2746
2747 if (ip_rt_proc_init())
2748 pr_err("Unable to create route proc files\n");
2749 #ifdef CONFIG_XFRM
2750 xfrm_init();
2751 xfrm4_init();
2752 #endif
2753 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2754
2755 #ifdef CONFIG_SYSCTL
2756 register_pernet_subsys(&sysctl_route_ops);
2757 #endif
2758 register_pernet_subsys(&rt_genid_ops);
2759 register_pernet_subsys(&ipv4_inetpeer_ops);
2760 return rc;
2761 }
2762
2763 #ifdef CONFIG_SYSCTL
2764 /*
2765 * We really need to sanitize the damn ipv4 init order, then all
2766 * this nonsense will go away.
2767 */
2768 void __init ip_static_sysctl_init(void)
2769 {
2770 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2771 }
2772 #endif