xfrm: fix __xfrm_route_forward()
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
68a5e3dd
DM
112#define RT_FL_TOS(oldflp4) \
113 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
1da177e4
LT
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
1da177e4
LT
134/*
135 * Interface to generic destination cache.
136 */
137
138static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 139static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 140static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 141static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 145static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 146
72cdd1d9
ED
147static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 int how)
149{
150}
1da177e4 151
62fa8a84
DM
152static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153{
06582540
DM
154 struct rtable *rt = (struct rtable *) dst;
155 struct inet_peer *peer;
156 u32 *p = NULL;
157
158 if (!rt->peer)
159 rt_bind_peer(rt, 1);
62fa8a84 160
06582540
DM
161 peer = rt->peer;
162 if (peer) {
62fa8a84
DM
163 u32 *old_p = __DST_METRICS_PTR(old);
164 unsigned long prev, new;
165
06582540
DM
166 p = peer->metrics;
167 if (inet_metrics_new(peer))
168 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
169
170 new = (unsigned long) p;
171 prev = cmpxchg(&dst->_metrics, old, new);
172
173 if (prev != old) {
62fa8a84
DM
174 p = __DST_METRICS_PTR(prev);
175 if (prev & DST_METRICS_READ_ONLY)
176 p = NULL;
177 } else {
62fa8a84
DM
178 if (rt->fi) {
179 fib_info_put(rt->fi);
180 rt->fi = NULL;
181 }
182 }
183 }
184 return p;
185}
186
1da177e4
LT
187static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET,
09640e63 189 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
190 .gc = rt_garbage_collect,
191 .check = ipv4_dst_check,
0dbaee3b 192 .default_advmss = ipv4_default_advmss,
d33e4553 193 .default_mtu = ipv4_default_mtu,
62fa8a84 194 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
195 .destroy = ipv4_dst_destroy,
196 .ifdown = ipv4_dst_ifdown,
197 .negative_advice = ipv4_negative_advice,
198 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 200 .local_out = __ip_local_out,
1da177e4
LT
201};
202
203#define ECN_OR_COST(class) TC_PRIO_##class
204
4839c52b 205const __u8 ip_tos2prio[16] = {
1da177e4
LT
206 TC_PRIO_BESTEFFORT,
207 ECN_OR_COST(FILLER),
208 TC_PRIO_BESTEFFORT,
209 ECN_OR_COST(BESTEFFORT),
210 TC_PRIO_BULK,
211 ECN_OR_COST(BULK),
212 TC_PRIO_BULK,
213 ECN_OR_COST(BULK),
214 TC_PRIO_INTERACTIVE,
215 ECN_OR_COST(INTERACTIVE),
216 TC_PRIO_INTERACTIVE,
217 ECN_OR_COST(INTERACTIVE),
218 TC_PRIO_INTERACTIVE_BULK,
219 ECN_OR_COST(INTERACTIVE_BULK),
220 TC_PRIO_INTERACTIVE_BULK,
221 ECN_OR_COST(INTERACTIVE_BULK)
222};
223
224
225/*
226 * Route cache.
227 */
228
229/* The locking scheme is rather straight forward:
230 *
231 * 1) Read-Copy Update protects the buckets of the central route hash.
232 * 2) Only writers remove entries, and they hold the lock
233 * as they look at rtable reference counts.
234 * 3) Only readers acquire references to rtable entries,
235 * they do so with atomic increments and with the
236 * lock held.
237 */
238
239struct rt_hash_bucket {
1c31720a 240 struct rtable __rcu *chain;
22c047cc 241};
1080d709 242
8a25d5de
IM
243#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
245/*
246 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247 * The size of this table is a power of two and depends on the number of CPUS.
62051200 248 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 249 */
62051200
IM
250#ifdef CONFIG_LOCKDEP
251# define RT_HASH_LOCK_SZ 256
22c047cc 252#else
62051200
IM
253# if NR_CPUS >= 32
254# define RT_HASH_LOCK_SZ 4096
255# elif NR_CPUS >= 16
256# define RT_HASH_LOCK_SZ 2048
257# elif NR_CPUS >= 8
258# define RT_HASH_LOCK_SZ 1024
259# elif NR_CPUS >= 4
260# define RT_HASH_LOCK_SZ 512
261# else
262# define RT_HASH_LOCK_SZ 256
263# endif
22c047cc
ED
264#endif
265
266static spinlock_t *rt_hash_locks;
267# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
268
269static __init void rt_hash_lock_init(void)
270{
271 int i;
272
273 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 GFP_KERNEL);
275 if (!rt_hash_locks)
276 panic("IP: failed to allocate rt_hash_locks\n");
277
278 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 spin_lock_init(&rt_hash_locks[i]);
280}
22c047cc
ED
281#else
282# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
283
284static inline void rt_hash_lock_init(void)
285{
286}
22c047cc 287#endif
1da177e4 288
817bc4db
SH
289static struct rt_hash_bucket *rt_hash_table __read_mostly;
290static unsigned rt_hash_mask __read_mostly;
291static unsigned int rt_hash_log __read_mostly;
1da177e4 292
2f970d83 293static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 294#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 295
b00180de 296static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 297 int genid)
1da177e4 298{
0eae88f3 299 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 300 idx, genid)
29e75252 301 & rt_hash_mask;
1da177e4
LT
302}
303
e84f84f2
DL
304static inline int rt_genid(struct net *net)
305{
306 return atomic_read(&net->ipv4.rt_genid);
307}
308
1da177e4
LT
309#ifdef CONFIG_PROC_FS
310struct rt_cache_iter_state {
a75e936f 311 struct seq_net_private p;
1da177e4 312 int bucket;
29e75252 313 int genid;
1da177e4
LT
314};
315
1218854a 316static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 317{
1218854a 318 struct rt_cache_iter_state *st = seq->private;
1da177e4 319 struct rtable *r = NULL;
1da177e4
LT
320
321 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 322 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 323 continue;
1da177e4 324 rcu_read_lock_bh();
a898def2 325 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 326 while (r) {
d8d1f30b 327 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 328 r->rt_genid == st->genid)
29e75252 329 return r;
d8d1f30b 330 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 331 }
1da177e4
LT
332 rcu_read_unlock_bh();
333 }
29e75252 334 return r;
1da177e4
LT
335}
336
1218854a 337static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 338 struct rtable *r)
1da177e4 339{
1218854a 340 struct rt_cache_iter_state *st = seq->private;
a6272665 341
1c31720a 342 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
343 while (!r) {
344 rcu_read_unlock_bh();
a6272665
ED
345 do {
346 if (--st->bucket < 0)
347 return NULL;
1c31720a 348 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 349 rcu_read_lock_bh();
1c31720a 350 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 351 }
1c31720a 352 return r;
1da177e4
LT
353}
354
1218854a 355static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
356 struct rtable *r)
357{
1218854a
YH
358 struct rt_cache_iter_state *st = seq->private;
359 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 360 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 361 continue;
642d6318
DL
362 if (r->rt_genid == st->genid)
363 break;
364 }
365 return r;
366}
367
1218854a 368static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 369{
1218854a 370 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
371
372 if (r)
1218854a 373 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
374 --pos;
375 return pos ? NULL : r;
376}
377
378static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379{
29e75252 380 struct rt_cache_iter_state *st = seq->private;
29e75252 381 if (*pos)
1218854a 382 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 383 st->genid = rt_genid(seq_file_net(seq));
29e75252 384 return SEQ_START_TOKEN;
1da177e4
LT
385}
386
387static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388{
29e75252 389 struct rtable *r;
1da177e4
LT
390
391 if (v == SEQ_START_TOKEN)
1218854a 392 r = rt_cache_get_first(seq);
1da177e4 393 else
1218854a 394 r = rt_cache_get_next(seq, v);
1da177e4
LT
395 ++*pos;
396 return r;
397}
398
399static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400{
401 if (v && v != SEQ_START_TOKEN)
402 rcu_read_unlock_bh();
403}
404
405static int rt_cache_seq_show(struct seq_file *seq, void *v)
406{
407 if (v == SEQ_START_TOKEN)
408 seq_printf(seq, "%-127s\n",
409 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 "HHUptod\tSpecDst");
412 else {
413 struct rtable *r = v;
5e659e4c 414 int len;
1da177e4 415
0eae88f3
ED
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 418 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
419 (__force u32)r->rt_dst,
420 (__force u32)r->rt_gateway,
d8d1f30b
CG
421 r->rt_flags, atomic_read(&r->dst.__refcnt),
422 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 423 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)),
5e2b61f7 427 r->rt_tos,
d8d1f30b
CG
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 430 dev_queue_xmit) : 0,
5e659e4c
PE
431 r->rt_spec_dst, &len);
432
433 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
434 }
435 return 0;
1da177e4
LT
436}
437
f690808e 438static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
439 .start = rt_cache_seq_start,
440 .next = rt_cache_seq_next,
441 .stop = rt_cache_seq_stop,
442 .show = rt_cache_seq_show,
443};
444
445static int rt_cache_seq_open(struct inode *inode, struct file *file)
446{
a75e936f 447 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 448 sizeof(struct rt_cache_iter_state));
1da177e4
LT
449}
450
9a32144e 451static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
452 .owner = THIS_MODULE,
453 .open = rt_cache_seq_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
a75e936f 456 .release = seq_release_net,
1da177e4
LT
457};
458
459
460static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461{
462 int cpu;
463
464 if (*pos == 0)
465 return SEQ_START_TOKEN;
466
0f23174a 467 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
468 if (!cpu_possible(cpu))
469 continue;
470 *pos = cpu+1;
2f970d83 471 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
472 }
473 return NULL;
474}
475
476static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 int cpu;
479
0f23174a 480 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
481 if (!cpu_possible(cpu))
482 continue;
483 *pos = cpu+1;
2f970d83 484 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
485 }
486 return NULL;
e905a9ed 487
1da177e4
LT
488}
489
490static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491{
492
493}
494
495static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496{
497 struct rt_cache_stat *st = v;
498
499 if (v == SEQ_START_TOKEN) {
5bec0039 500 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
501 return 0;
502 }
e905a9ed 503
1da177e4
LT
504 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
505 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 506 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
507 st->in_hit,
508 st->in_slow_tot,
509 st->in_slow_mc,
510 st->in_no_route,
511 st->in_brd,
512 st->in_martian_dst,
513 st->in_martian_src,
514
515 st->out_hit,
516 st->out_slow_tot,
e905a9ed 517 st->out_slow_mc,
1da177e4
LT
518
519 st->gc_total,
520 st->gc_ignored,
521 st->gc_goal_miss,
522 st->gc_dst_overflow,
523 st->in_hlist_search,
524 st->out_hlist_search
525 );
526 return 0;
527}
528
f690808e 529static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
530 .start = rt_cpu_seq_start,
531 .next = rt_cpu_seq_next,
532 .stop = rt_cpu_seq_stop,
533 .show = rt_cpu_seq_show,
534};
535
536
537static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538{
539 return seq_open(file, &rt_cpu_seq_ops);
540}
541
9a32144e 542static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
543 .owner = THIS_MODULE,
544 .open = rt_cpu_seq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
548};
549
c7066f70 550#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 551static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 552{
a661c419
AD
553 struct ip_rt_acct *dst, *src;
554 unsigned int i, j;
555
556 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 if (!dst)
558 return -ENOMEM;
559
560 for_each_possible_cpu(i) {
561 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 for (j = 0; j < 256; j++) {
563 dst[j].o_bytes += src[j].o_bytes;
564 dst[j].o_packets += src[j].o_packets;
565 dst[j].i_bytes += src[j].i_bytes;
566 dst[j].i_packets += src[j].i_packets;
567 }
78c686e9
PE
568 }
569
a661c419
AD
570 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 kfree(dst);
572 return 0;
573}
78c686e9 574
a661c419
AD
575static int rt_acct_proc_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 578}
a661c419
AD
579
580static const struct file_operations rt_acct_proc_fops = {
581 .owner = THIS_MODULE,
582 .open = rt_acct_proc_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = single_release,
586};
78c686e9 587#endif
107f1634 588
73b38711 589static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
590{
591 struct proc_dir_entry *pde;
592
593 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 &rt_cache_seq_fops);
595 if (!pde)
596 goto err1;
597
77020720
WC
598 pde = proc_create("rt_cache", S_IRUGO,
599 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
600 if (!pde)
601 goto err2;
602
c7066f70 603#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 604 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
605 if (!pde)
606 goto err3;
607#endif
608 return 0;
609
c7066f70 610#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
611err3:
612 remove_proc_entry("rt_cache", net->proc_net_stat);
613#endif
614err2:
615 remove_proc_entry("rt_cache", net->proc_net);
616err1:
617 return -ENOMEM;
618}
73b38711
DL
619
620static void __net_exit ip_rt_do_proc_exit(struct net *net)
621{
622 remove_proc_entry("rt_cache", net->proc_net_stat);
623 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 624#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 625 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 626#endif
73b38711
DL
627}
628
629static struct pernet_operations ip_rt_proc_ops __net_initdata = {
630 .init = ip_rt_do_proc_init,
631 .exit = ip_rt_do_proc_exit,
632};
633
634static int __init ip_rt_proc_init(void)
635{
636 return register_pernet_subsys(&ip_rt_proc_ops);
637}
638
107f1634 639#else
73b38711 640static inline int ip_rt_proc_init(void)
107f1634
PE
641{
642 return 0;
643}
1da177e4 644#endif /* CONFIG_PROC_FS */
e905a9ed 645
5969f71d 646static inline void rt_free(struct rtable *rt)
1da177e4 647{
d8d1f30b 648 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
649}
650
5969f71d 651static inline void rt_drop(struct rtable *rt)
1da177e4 652{
1da177e4 653 ip_rt_put(rt);
d8d1f30b 654 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
655}
656
5969f71d 657static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
658{
659 /* Kill broadcast/multicast entries very aggresively, if they
660 collide in hash table with more useful entries */
661 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 662 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
663}
664
5969f71d 665static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
666{
667 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 668 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
669}
670
671static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672{
673 unsigned long age;
674 int ret = 0;
675
d8d1f30b 676 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
677 goto out;
678
d8d1f30b 679 age = jiffies - rth->dst.lastuse;
1da177e4
LT
680 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 (age <= tmo2 && rt_valuable(rth)))
682 goto out;
683 ret = 1;
684out: return ret;
685}
686
687/* Bits of score are:
688 * 31: very valuable
689 * 30: not quite useless
690 * 29..0: usage counter
691 */
692static inline u32 rt_score(struct rtable *rt)
693{
d8d1f30b 694 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
695
696 score = ~score & ~(3<<30);
697
698 if (rt_valuable(rt))
699 score |= (1<<31);
700
c7537967 701 if (rt_is_output_route(rt) ||
1da177e4
LT
702 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 score |= (1<<30);
704
705 return score;
706}
707
1080d709
NH
708static inline bool rt_caching(const struct net *net)
709{
710 return net->ipv4.current_rt_cache_rebuild_count <=
711 net->ipv4.sysctl_rt_cache_rebuild_count;
712}
713
5e2b61f7
DM
714static inline bool compare_hash_inputs(const struct rtable *rt1,
715 const struct rtable *rt2)
1080d709 716{
5e2b61f7
DM
717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
1080d709
NH
720}
721
5e2b61f7 722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 723{
5e2b61f7
DM
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
1da177e4
LT
730}
731
b5921910
DL
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733{
d8d1f30b 734 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
735}
736
e84f84f2
DL
737static inline int rt_is_expired(struct rtable *rth)
738{
d8d1f30b 739 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
740}
741
beb659bd
ED
742/*
743 * Perform a full scan of hash table and free all entries.
744 * Can be called by a softirq or a process.
745 * In the later case, we want to be reschedule if necessary
746 */
6561a3b1 747static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
748{
749 unsigned int i;
750 struct rtable *rth, *next;
751
752 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
753 struct rtable __rcu **pprev;
754 struct rtable *list;
755
beb659bd
ED
756 if (process_context && need_resched())
757 cond_resched();
1c31720a 758 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
759 if (!rth)
760 continue;
761
762 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 763
6561a3b1
DM
764 list = NULL;
765 pprev = &rt_hash_table[i].chain;
766 rth = rcu_dereference_protected(*pprev,
1c31720a 767 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 768
6561a3b1
DM
769 while (rth) {
770 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
772
773 if (!net ||
774 net_eq(dev_net(rth->dst.dev), net)) {
775 rcu_assign_pointer(*pprev, next);
776 rcu_assign_pointer(rth->dst.rt_next, list);
777 list = rth;
32cb5b4e 778 } else {
6561a3b1 779 pprev = &rth->dst.rt_next;
32cb5b4e 780 }
6561a3b1 781 rth = next;
32cb5b4e 782 }
6561a3b1 783
beb659bd
ED
784 spin_unlock_bh(rt_hash_lock_addr(i));
785
6561a3b1
DM
786 for (; list; list = next) {
787 next = rcu_dereference_protected(list->dst.rt_next, 1);
788 rt_free(list);
beb659bd
ED
789 }
790 }
791}
792
1080d709
NH
793/*
794 * While freeing expired entries, we compute average chain length
795 * and standard deviation, using fixed-point arithmetic.
796 * This to have an estimation of rt_chain_length_max
797 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
798 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799 */
800
801#define FRACT_BITS 3
802#define ONE (1UL << FRACT_BITS)
803
98376387
ED
804/*
805 * Given a hash chain and an item in this hash chain,
806 * find if a previous entry has the same hash_inputs
807 * (but differs on tos, mark or oif)
808 * Returns 0 if an alias is found.
809 * Returns ONE if rth has no alias before itself.
810 */
811static int has_noalias(const struct rtable *head, const struct rtable *rth)
812{
813 const struct rtable *aux = head;
814
815 while (aux != rth) {
5e2b61f7 816 if (compare_hash_inputs(aux, rth))
98376387 817 return 0;
1c31720a 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
819 }
820 return ONE;
821}
822
29e75252
ED
823/*
824 * Pertubation of rt_genid by a small quantity [1..256]
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 828 */
86c657f6 829static void rt_cache_invalidate(struct net *net)
1da177e4 830{
29e75252 831 unsigned char shuffle;
1da177e4 832
29e75252 833 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 834 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
835}
836
29e75252
ED
837/*
838 * delay < 0 : invalidate cache (fast : entries will be deleted later)
839 * delay >= 0 : invalidate & flush cache (can be long)
840 */
76e6ebfb 841void rt_cache_flush(struct net *net, int delay)
1da177e4 842{
86c657f6 843 rt_cache_invalidate(net);
29e75252 844 if (delay >= 0)
6561a3b1 845 rt_do_flush(net, !in_softirq());
1da177e4
LT
846}
847
a5ee1551 848/* Flush previous cache invalidated entries from the cache */
6561a3b1 849void rt_cache_flush_batch(struct net *net)
a5ee1551 850{
6561a3b1 851 rt_do_flush(net, !in_softirq());
a5ee1551
EB
852}
853
1080d709
NH
854static void rt_emergency_hash_rebuild(struct net *net)
855{
3ee94372 856 if (net_ratelimit())
1080d709 857 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 858 rt_cache_invalidate(net);
1080d709
NH
859}
860
1da177e4
LT
861/*
862 Short description of GC goals.
863
864 We want to build algorithm, which will keep routing cache
865 at some equilibrium point, when number of aged off entries
866 is kept approximately equal to newly generated ones.
867
868 Current expiration strength is variable "expire".
869 We try to adjust it dynamically, so that if networking
870 is idle expires is large enough to keep enough of warm entries,
871 and when load increases it reduces to limit cache size.
872 */
873
569d3645 874static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
875{
876 static unsigned long expire = RT_GC_TIMEOUT;
877 static unsigned long last_gc;
878 static int rover;
879 static int equilibrium;
1c31720a
ED
880 struct rtable *rth;
881 struct rtable __rcu **rthp;
1da177e4
LT
882 unsigned long now = jiffies;
883 int goal;
fc66f95c 884 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
885
886 /*
887 * Garbage collection is pretty expensive,
888 * do not make it too frequently.
889 */
890
891 RT_CACHE_STAT_INC(gc_total);
892
893 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 894 entries < ip_rt_max_size) {
1da177e4
LT
895 RT_CACHE_STAT_INC(gc_ignored);
896 goto out;
897 }
898
fc66f95c 899 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 900 /* Calculate number of entries, which we want to expire now. */
fc66f95c 901 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
902 if (goal <= 0) {
903 if (equilibrium < ipv4_dst_ops.gc_thresh)
904 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 905 goal = entries - equilibrium;
1da177e4 906 if (goal > 0) {
b790cedd 907 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 908 goal = entries - equilibrium;
1da177e4
LT
909 }
910 } else {
911 /* We are in dangerous area. Try to reduce cache really
912 * aggressively.
913 */
b790cedd 914 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 915 equilibrium = entries - goal;
1da177e4
LT
916 }
917
918 if (now - last_gc >= ip_rt_gc_min_interval)
919 last_gc = now;
920
921 if (goal <= 0) {
922 equilibrium += goal;
923 goto work_done;
924 }
925
926 do {
927 int i, k;
928
929 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 unsigned long tmo = expire;
931
932 k = (k + 1) & rt_hash_mask;
933 rthp = &rt_hash_table[k].chain;
22c047cc 934 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
935 while ((rth = rcu_dereference_protected(*rthp,
936 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 937 if (!rt_is_expired(rth) &&
29e75252 938 !rt_may_expire(rth, tmo, expire)) {
1da177e4 939 tmo >>= 1;
d8d1f30b 940 rthp = &rth->dst.rt_next;
1da177e4
LT
941 continue;
942 }
d8d1f30b 943 *rthp = rth->dst.rt_next;
1da177e4
LT
944 rt_free(rth);
945 goal--;
1da177e4 946 }
22c047cc 947 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
948 if (goal <= 0)
949 break;
950 }
951 rover = k;
952
953 if (goal <= 0)
954 goto work_done;
955
956 /* Goal is not achieved. We stop process if:
957
958 - if expire reduced to zero. Otherwise, expire is halfed.
959 - if table is not full.
960 - if we are called from interrupt.
961 - jiffies check is just fallback/debug loop breaker.
962 We will not spin here for long time in any case.
963 */
964
965 RT_CACHE_STAT_INC(gc_goal_miss);
966
967 if (expire == 0)
968 break;
969
970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
974#endif
975
fc66f95c 976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
977 goto out;
978 } while (!in_softirq() && time_before_eq(jiffies, now));
979
fc66f95c
ED
980 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
981 goto out;
982 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
983 goto out;
984 if (net_ratelimit())
985 printk(KERN_WARNING "dst cache overflow\n");
986 RT_CACHE_STAT_INC(gc_dst_overflow);
987 return 1;
988
989work_done:
990 expire += ip_rt_gc_min_interval;
991 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
994 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
998#endif
999out: return 0;
1000}
1001
98376387
ED
1002/*
1003 * Returns number of entries in a hash chain that have different hash_inputs
1004 */
1005static int slow_chain_length(const struct rtable *head)
1006{
1007 int length = 0;
1008 const struct rtable *rth = head;
1009
1010 while (rth) {
1011 length += has_noalias(head, rth);
1c31720a 1012 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1013 }
1014 return length >> FRACT_BITS;
1015}
1016
b23dd4fe
DM
1017static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1018 struct sk_buff *skb, int ifindex)
1da177e4 1019{
1c31720a
ED
1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp;
1da177e4 1022 unsigned long now;
1da177e4
LT
1023 u32 min_score;
1024 int chain_length;
1025 int attempts = !in_softirq();
1026
1027restart:
1028 chain_length = 0;
1029 min_score = ~(u32)0;
1030 cand = NULL;
1031 candp = NULL;
1032 now = jiffies;
1033
d8d1f30b 1034 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1035 /*
1036 * If we're not caching, just tell the caller we
1037 * were successful and don't touch the route. The
1038 * caller hold the sole reference to the cache entry, and
1039 * it will be released when the caller is done with it.
1040 * If we drop it here, the callers have no way to resolve routes
1041 * when we're not caching. Instead, just point *rp at rt, so
1042 * the caller gets a single use out of the route
b6280b47
NH
1043 * Note that we do rt_free on this new route entry, so that
1044 * once its refcount hits zero, we are still able to reap it
1045 * (Thanks Alexey)
27b75c95
ED
1046 * Note: To avoid expensive rcu stuff for this uncached dst,
1047 * we set DST_NOCACHE so that dst_release() can free dst without
1048 * waiting a grace period.
73e42897 1049 */
b6280b47 1050
c7d4426a 1051 rt->dst.flags |= DST_NOCACHE;
c7537967 1052 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1053 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1054 if (err) {
1055 if (net_ratelimit())
1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n");
27b75c95 1058 ip_rt_put(rt);
b23dd4fe 1059 return ERR_PTR(err);
b6280b47
NH
1060 }
1061 }
1062
b6280b47 1063 goto skip_hashing;
1080d709
NH
1064 }
1065
1da177e4
LT
1066 rthp = &rt_hash_table[hash].chain;
1067
22c047cc 1068 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1069 while ((rth = rcu_dereference_protected(*rthp,
1070 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1071 if (rt_is_expired(rth)) {
d8d1f30b 1072 *rthp = rth->dst.rt_next;
29e75252
ED
1073 rt_free(rth);
1074 continue;
1075 }
5e2b61f7 1076 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1077 /* Put it first */
d8d1f30b 1078 *rthp = rth->dst.rt_next;
1da177e4
LT
1079 /*
1080 * Since lookup is lockfree, the deletion
1081 * must be visible to another weakly ordered CPU before
1082 * the insertion at the start of the hash chain.
1083 */
d8d1f30b 1084 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1085 rt_hash_table[hash].chain);
1086 /*
1087 * Since lookup is lockfree, the update writes
1088 * must be ordered for consistency on SMP.
1089 */
1090 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1091
d8d1f30b 1092 dst_use(&rth->dst, now);
22c047cc 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1094
1095 rt_drop(rt);
b23dd4fe 1096 if (skb)
d8d1f30b 1097 skb_dst_set(skb, &rth->dst);
b23dd4fe 1098 return rth;
1da177e4
LT
1099 }
1100
d8d1f30b 1101 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1102 u32 score = rt_score(rth);
1103
1104 if (score <= min_score) {
1105 cand = rth;
1106 candp = rthp;
1107 min_score = score;
1108 }
1109 }
1110
1111 chain_length++;
1112
d8d1f30b 1113 rthp = &rth->dst.rt_next;
1da177e4
LT
1114 }
1115
1116 if (cand) {
1117 /* ip_rt_gc_elasticity used to be average length of chain
1118 * length, when exceeded gc becomes really aggressive.
1119 *
1120 * The second limit is less certain. At the moment it allows
1121 * only 2 entries per bucket. We will see.
1122 */
1123 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1124 *candp = cand->dst.rt_next;
1da177e4
LT
1125 rt_free(cand);
1126 }
1080d709 1127 } else {
98376387
ED
1128 if (chain_length > rt_chain_length_max &&
1129 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1130 struct net *net = dev_net(rt->dst.dev);
1080d709 1131 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1132 if (!rt_caching(net)) {
1080d709 1133 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1134 rt->dst.dev->name, num);
1080d709 1135 }
b35ecb5d 1136 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1137 spin_unlock_bh(rt_hash_lock_addr(hash));
1138
5e2b61f7 1139 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1140 ifindex, rt_genid(net));
1141 goto restart;
1080d709 1142 }
1da177e4
LT
1143 }
1144
1145 /* Try to bind route to arp only if it is output
1146 route or unicast forwarding path.
1147 */
c7537967 1148 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1149 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1150 if (err) {
22c047cc 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1152
1153 if (err != -ENOBUFS) {
1154 rt_drop(rt);
b23dd4fe 1155 return ERR_PTR(err);
1da177e4
LT
1156 }
1157
1158 /* Neighbour tables are full and nothing
1159 can be released. Try to shrink route cache,
1160 it is most likely it holds some neighbour records.
1161 */
1162 if (attempts-- > 0) {
1163 int saved_elasticity = ip_rt_gc_elasticity;
1164 int saved_int = ip_rt_gc_min_interval;
1165 ip_rt_gc_elasticity = 1;
1166 ip_rt_gc_min_interval = 0;
569d3645 1167 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1168 ip_rt_gc_min_interval = saved_int;
1169 ip_rt_gc_elasticity = saved_elasticity;
1170 goto restart;
1171 }
1172
1173 if (net_ratelimit())
7e1b33e5 1174 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4 1175 rt_drop(rt);
b23dd4fe 1176 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1177 }
1178 }
1179
d8d1f30b 1180 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1181
1da177e4 1182#if RT_CACHE_DEBUG >= 2
d8d1f30b 1183 if (rt->dst.rt_next) {
1da177e4 1184 struct rtable *trt;
b6280b47
NH
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
d8d1f30b 1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1188 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1189 printk("\n");
1190 }
1191#endif
00269b54
ED
1192 /*
1193 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are comitted to memory
1195 * before making rt visible to other CPUS.
1196 */
1ddbcb00 1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1198
22c047cc 1199 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1200
b6280b47 1201skip_hashing:
b23dd4fe 1202 if (skb)
d8d1f30b 1203 skb_dst_set(skb, &rt->dst);
b23dd4fe 1204 return rt;
1da177e4
LT
1205}
1206
6431cbc2
DM
1207static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1208
1209static u32 rt_peer_genid(void)
1210{
1211 return atomic_read(&__rt_peer_genid);
1212}
1213
1da177e4
LT
1214void rt_bind_peer(struct rtable *rt, int create)
1215{
1da177e4
LT
1216 struct inet_peer *peer;
1217
b534ecf1 1218 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1219
49e8ab03 1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1221 inet_putpeer(peer);
6431cbc2
DM
1222 else
1223 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1224}
1225
1226/*
1227 * Peer allocation may fail only in serious out-of-memory conditions. However
1228 * we still can generate some output.
1229 * Random ID selection looks a bit dangerous because we have no chances to
1230 * select ID being unique in a reasonable period of time.
1231 * But broken packet identifier may be better than no packet at all.
1232 */
1233static void ip_select_fb_ident(struct iphdr *iph)
1234{
1235 static DEFINE_SPINLOCK(ip_fb_id_lock);
1236 static u32 ip_fallback_id;
1237 u32 salt;
1238
1239 spin_lock_bh(&ip_fb_id_lock);
e448515c 1240 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1241 iph->id = htons(salt & 0xFFFF);
1242 ip_fallback_id = salt;
1243 spin_unlock_bh(&ip_fb_id_lock);
1244}
1245
1246void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1247{
1248 struct rtable *rt = (struct rtable *) dst;
1249
1250 if (rt) {
1251 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1);
1253
1254 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it.
1256 */
1257 if (rt->peer) {
1258 iph->id = htons(inet_getid(rt->peer, more));
1259 return;
1260 }
1261 } else
e905a9ed 1262 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1263 __builtin_return_address(0));
1da177e4
LT
1264
1265 ip_select_fb_ident(iph);
1266}
4bc2f18b 1267EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1268
1269static void rt_del(unsigned hash, struct rtable *rt)
1270{
1c31720a
ED
1271 struct rtable __rcu **rthp;
1272 struct rtable *aux;
1da177e4 1273
29e75252 1274 rthp = &rt_hash_table[hash].chain;
22c047cc 1275 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1276 ip_rt_put(rt);
1c31720a
ED
1277 while ((aux = rcu_dereference_protected(*rthp,
1278 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1279 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1280 *rthp = aux->dst.rt_next;
29e75252
ED
1281 rt_free(aux);
1282 continue;
1da177e4 1283 }
d8d1f30b 1284 rthp = &aux->dst.rt_next;
29e75252 1285 }
22c047cc 1286 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1287}
1288
ed7865a4 1289/* called in rcu_read_lock() section */
f7655229
AV
1290void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1291 __be32 saddr, struct net_device *dev)
1da177e4 1292{
ed7865a4 1293 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1294 struct inet_peer *peer;
317805b8 1295 struct net *net;
1da177e4 1296
1da177e4
LT
1297 if (!in_dev)
1298 return;
1299
c346dca1 1300 net = dev_net(dev);
9d4fb27d
JP
1301 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1302 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1303 ipv4_is_zeronet(new_gw))
1da177e4
LT
1304 goto reject_redirect;
1305
1306 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1307 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1308 goto reject_redirect;
1309 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1310 goto reject_redirect;
1311 } else {
317805b8 1312 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1313 goto reject_redirect;
1314 }
1315
f39925db
DM
1316 peer = inet_getpeer_v4(daddr, 1);
1317 if (peer) {
1318 peer->redirect_learned.a4 = new_gw;
e905a9ed 1319
f39925db 1320 inet_putpeer(peer);
1da177e4 1321
f39925db 1322 atomic_inc(&__rt_peer_genid);
1da177e4 1323 }
1da177e4
LT
1324 return;
1325
1326reject_redirect:
1327#ifdef CONFIG_IP_ROUTE_VERBOSE
1328 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1329 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1330 " Advised path = %pI4 -> %pI4\n",
1331 &old_gw, dev->name, &new_gw,
1332 &saddr, &daddr);
1da177e4 1333#endif
ed7865a4 1334 ;
1da177e4
LT
1335}
1336
1337static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1338{
ee6b9673 1339 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1340 struct dst_entry *ret = dst;
1341
1342 if (rt) {
d11a4dc1 1343 if (dst->obsolete > 0) {
1da177e4
LT
1344 ip_rt_put(rt);
1345 ret = NULL;
2c8cec5c 1346 } else if (rt->rt_flags & RTCF_REDIRECTED) {
5e2b61f7
DM
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif,
e84f84f2 1349 rt_genid(dev_net(dst->dev)));
1da177e4 1350#if RT_CACHE_DEBUG >= 1
673d57e7 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
5e2b61f7 1352 &rt->rt_dst, rt->rt_tos);
1da177e4
LT
1353#endif
1354 rt_del(hash, rt);
1355 ret = NULL;
2c8cec5c
DM
1356 } else if (rt->peer &&
1357 rt->peer->pmtu_expires &&
1358 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1359 unsigned long orig = rt->peer->pmtu_expires;
1360
1361 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1362 dst_metric_set(dst, RTAX_MTU,
1363 rt->peer->pmtu_orig);
1da177e4
LT
1364 }
1365 }
1366 return ret;
1367}
1368
1369/*
1370 * Algorithm:
1371 * 1. The first ip_rt_redirect_number redirects are sent
1372 * with exponential backoff, then we stop sending them at all,
1373 * assuming that the host ignores our redirects.
1374 * 2. If we did not see packets requiring redirects
1375 * during ip_rt_redirect_silence, we assume that the host
1376 * forgot redirected route and start to send redirects again.
1377 *
1378 * This algorithm is much cheaper and more intelligent than dumb load limiting
1379 * in icmp.c.
1380 *
1381 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1382 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1383 */
1384
1385void ip_rt_send_redirect(struct sk_buff *skb)
1386{
511c3f92 1387 struct rtable *rt = skb_rtable(skb);
30038fc6 1388 struct in_device *in_dev;
92d86829 1389 struct inet_peer *peer;
30038fc6 1390 int log_martians;
1da177e4 1391
30038fc6 1392 rcu_read_lock();
d8d1f30b 1393 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1394 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1395 rcu_read_unlock();
1da177e4 1396 return;
30038fc6
ED
1397 }
1398 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1399 rcu_read_unlock();
1da177e4 1400
92d86829
DM
1401 if (!rt->peer)
1402 rt_bind_peer(rt, 1);
1403 peer = rt->peer;
1404 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1406 return;
1407 }
1408
1da177e4
LT
1409 /* No redirected packets during ip_rt_redirect_silence;
1410 * reset the algorithm.
1411 */
92d86829
DM
1412 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1413 peer->rate_tokens = 0;
1da177e4
LT
1414
1415 /* Too many ignored redirects; do not send anything
d8d1f30b 1416 * set dst.rate_last to the last seen redirected packet.
1da177e4 1417 */
92d86829
DM
1418 if (peer->rate_tokens >= ip_rt_redirect_number) {
1419 peer->rate_last = jiffies;
30038fc6 1420 return;
1da177e4
LT
1421 }
1422
1423 /* Check for load limit; set rate_last to the latest sent
1424 * redirect.
1425 */
92d86829 1426 if (peer->rate_tokens == 0 ||
14fb8a76 1427 time_after(jiffies,
92d86829
DM
1428 (peer->rate_last +
1429 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1430 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1431 peer->rate_last = jiffies;
1432 ++peer->rate_tokens;
1da177e4 1433#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1434 if (log_martians &&
92d86829 1435 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1436 net_ratelimit())
673d57e7
HH
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1440#endif
1441 }
1da177e4
LT
1442}
1443
1444static int ip_error(struct sk_buff *skb)
1445{
511c3f92 1446 struct rtable *rt = skb_rtable(skb);
92d86829 1447 struct inet_peer *peer;
1da177e4 1448 unsigned long now;
92d86829 1449 bool send;
1da177e4
LT
1450 int code;
1451
d8d1f30b 1452 switch (rt->dst.error) {
1da177e4
LT
1453 case EINVAL:
1454 default:
1455 goto out;
1456 case EHOSTUNREACH:
1457 code = ICMP_HOST_UNREACH;
1458 break;
1459 case ENETUNREACH:
1460 code = ICMP_NET_UNREACH;
d8d1f30b 1461 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1462 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1463 break;
1464 case EACCES:
1465 code = ICMP_PKT_FILTERED;
1466 break;
1467 }
1468
92d86829
DM
1469 if (!rt->peer)
1470 rt_bind_peer(rt, 1);
1471 peer = rt->peer;
1472
1473 send = true;
1474 if (peer) {
1475 now = jiffies;
1476 peer->rate_tokens += now - peer->rate_last;
1477 if (peer->rate_tokens > ip_rt_error_burst)
1478 peer->rate_tokens = ip_rt_error_burst;
1479 peer->rate_last = now;
1480 if (peer->rate_tokens >= ip_rt_error_cost)
1481 peer->rate_tokens -= ip_rt_error_cost;
1482 else
1483 send = false;
1da177e4 1484 }
92d86829
DM
1485 if (send)
1486 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1487
1488out: kfree_skb(skb);
1489 return 0;
e905a9ed 1490}
1da177e4
LT
1491
1492/*
1493 * The last two values are not from the RFC but
1494 * are needed for AMPRnet AX.25 paths.
1495 */
1496
9b5b5cff 1497static const unsigned short mtu_plateau[] =
1da177e4
LT
1498{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1499
5969f71d 1500static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1501{
1502 int i;
e905a9ed 1503
1da177e4
LT
1504 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1505 if (old_mtu > mtu_plateau[i])
1506 return mtu_plateau[i];
1507 return 68;
1508}
1509
b5921910 1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
0010e465
TT
1511 unsigned short new_mtu,
1512 struct net_device *dev)
1da177e4 1513{
1da177e4 1514 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1515 unsigned short est_mtu = 0;
2c8cec5c 1516 struct inet_peer *peer;
1da177e4 1517
2c8cec5c
DM
1518 peer = inet_getpeer_v4(iph->daddr, 1);
1519 if (peer) {
1520 unsigned short mtu = new_mtu;
1da177e4 1521
2c8cec5c
DM
1522 if (new_mtu < 68 || new_mtu >= old_mtu) {
1523 /* BSD 4.2 derived systems incorrectly adjust
1524 * tot_len by the IP header length, and report
1525 * a zero MTU in the ICMP message.
1526 */
1527 if (mtu == 0 &&
1528 old_mtu >= 68 + (iph->ihl << 2))
1529 old_mtu -= iph->ihl << 2;
1530 mtu = guess_mtu(old_mtu);
1531 }
0010e465 1532
2c8cec5c
DM
1533 if (mtu < ip_rt_min_pmtu)
1534 mtu = ip_rt_min_pmtu;
1535 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1536 unsigned long pmtu_expires;
1537
1538 pmtu_expires = jiffies + ip_rt_mtu_expires;
1539 if (!pmtu_expires)
1540 pmtu_expires = 1UL;
1541
2c8cec5c
DM
1542 est_mtu = mtu;
1543 peer->pmtu_learned = mtu;
46af3180 1544 peer->pmtu_expires = pmtu_expires;
2c8cec5c 1545 }
1da177e4 1546
2c8cec5c 1547 inet_putpeer(peer);
1da177e4 1548
2c8cec5c 1549 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1550 }
1551 return est_mtu ? : new_mtu;
1552}
1553
2c8cec5c
DM
1554static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1555{
1556 unsigned long expires = peer->pmtu_expires;
1557
46af3180 1558 if (time_before(jiffies, expires)) {
2c8cec5c
DM
1559 u32 orig_dst_mtu = dst_mtu(dst);
1560 if (peer->pmtu_learned < orig_dst_mtu) {
1561 if (!peer->pmtu_orig)
1562 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1563 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1564 }
1565 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1566 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1567}
1568
1da177e4
LT
1569static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1570{
2c8cec5c
DM
1571 struct rtable *rt = (struct rtable *) dst;
1572 struct inet_peer *peer;
1573
1574 dst_confirm(dst);
1575
1576 if (!rt->peer)
1577 rt_bind_peer(rt, 1);
1578 peer = rt->peer;
1579 if (peer) {
1580 if (mtu < ip_rt_min_pmtu)
1da177e4 1581 mtu = ip_rt_min_pmtu;
2c8cec5c 1582 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1583 unsigned long pmtu_expires;
1584
1585 pmtu_expires = jiffies + ip_rt_mtu_expires;
1586 if (!pmtu_expires)
1587 pmtu_expires = 1UL;
1588
2c8cec5c 1589 peer->pmtu_learned = mtu;
46af3180 1590 peer->pmtu_expires = pmtu_expires;
2c8cec5c
DM
1591
1592 atomic_inc(&__rt_peer_genid);
1593 rt->rt_peer_genid = rt_peer_genid();
1da177e4 1594 }
46af3180
HS
1595 check_peer_pmtu(dst, peer);
1596
2c8cec5c 1597 inet_putpeer(peer);
1da177e4
LT
1598 }
1599}
1600
f39925db
DM
1601static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1602{
1603 struct rtable *rt = (struct rtable *) dst;
1604 __be32 orig_gw = rt->rt_gateway;
1605
1606 dst_confirm(&rt->dst);
1607
1608 neigh_release(rt->dst.neighbour);
1609 rt->dst.neighbour = NULL;
1610
1611 rt->rt_gateway = peer->redirect_learned.a4;
1612 if (arp_bind_neighbour(&rt->dst) ||
1613 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1614 if (rt->dst.neighbour)
1615 neigh_event_send(rt->dst.neighbour, NULL);
1616 rt->rt_gateway = orig_gw;
1617 return -EAGAIN;
1618 } else {
1619 rt->rt_flags |= RTCF_REDIRECTED;
1620 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1621 rt->dst.neighbour);
1622 }
1623 return 0;
1624}
1625
1da177e4
LT
1626static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1627{
6431cbc2
DM
1628 struct rtable *rt = (struct rtable *) dst;
1629
1630 if (rt_is_expired(rt))
d11a4dc1 1631 return NULL;
6431cbc2 1632 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1633 struct inet_peer *peer;
1634
6431cbc2
DM
1635 if (!rt->peer)
1636 rt_bind_peer(rt, 0);
1637
2c8cec5c
DM
1638 peer = rt->peer;
1639 if (peer && peer->pmtu_expires)
1640 check_peer_pmtu(dst, peer);
1641
f39925db
DM
1642 if (peer && peer->redirect_learned.a4 &&
1643 peer->redirect_learned.a4 != rt->rt_gateway) {
1644 if (check_peer_redir(dst, peer))
1645 return NULL;
1646 }
1647
6431cbc2
DM
1648 rt->rt_peer_genid = rt_peer_genid();
1649 }
d11a4dc1 1650 return dst;
1da177e4
LT
1651}
1652
1653static void ipv4_dst_destroy(struct dst_entry *dst)
1654{
1655 struct rtable *rt = (struct rtable *) dst;
1656 struct inet_peer *peer = rt->peer;
1da177e4 1657
62fa8a84
DM
1658 if (rt->fi) {
1659 fib_info_put(rt->fi);
1660 rt->fi = NULL;
1661 }
1da177e4
LT
1662 if (peer) {
1663 rt->peer = NULL;
1664 inet_putpeer(peer);
1665 }
1da177e4
LT
1666}
1667
1da177e4
LT
1668
1669static void ipv4_link_failure(struct sk_buff *skb)
1670{
1671 struct rtable *rt;
1672
1673 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1674
511c3f92 1675 rt = skb_rtable(skb);
2c8cec5c
DM
1676 if (rt &&
1677 rt->peer &&
1678 rt->peer->pmtu_expires) {
1679 unsigned long orig = rt->peer->pmtu_expires;
1680
1681 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1682 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1683 }
1da177e4
LT
1684}
1685
1686static int ip_rt_bug(struct sk_buff *skb)
1687{
673d57e7
HH
1688 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1689 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1690 skb->dev ? skb->dev->name : "?");
1691 kfree_skb(skb);
1692 return 0;
1693}
1694
1695/*
1696 We do not cache source address of outgoing interface,
1697 because it is used only by IP RR, TS and SRR options,
1698 so that it out of fast path.
1699
1700 BTW remember: "addr" is allowed to be not aligned
1701 in IP options!
1702 */
1703
1704void ip_rt_get_source(u8 *addr, struct rtable *rt)
1705{
a61ced5d 1706 __be32 src;
1da177e4
LT
1707 struct fib_result res;
1708
c7537967 1709 if (rt_is_output_route(rt))
1da177e4 1710 src = rt->rt_src;
ebc0ffae 1711 else {
68a5e3dd
DM
1712 struct flowi4 fl4 = {
1713 .daddr = rt->rt_key_dst,
1714 .saddr = rt->rt_key_src,
1715 .flowi4_tos = rt->rt_tos,
1716 .flowi4_oif = rt->rt_oif,
1717 .flowi4_iif = rt->rt_iif,
1718 .flowi4_mark = rt->rt_mark,
5e2b61f7
DM
1719 };
1720
ebc0ffae 1721 rcu_read_lock();
68a5e3dd 1722 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
ebc0ffae
ED
1723 src = FIB_RES_PREFSRC(res);
1724 else
1725 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1726 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1727 rcu_read_unlock();
1728 }
1da177e4
LT
1729 memcpy(addr, &src, 4);
1730}
1731
c7066f70 1732#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1733static void set_class_tag(struct rtable *rt, u32 tag)
1734{
d8d1f30b
CG
1735 if (!(rt->dst.tclassid & 0xFFFF))
1736 rt->dst.tclassid |= tag & 0xFFFF;
1737 if (!(rt->dst.tclassid & 0xFFFF0000))
1738 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1739}
1740#endif
1741
0dbaee3b
DM
1742static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1743{
1744 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1745
1746 if (advmss == 0) {
1747 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1748 ip_rt_min_advmss);
1749 if (advmss > 65535 - 40)
1750 advmss = 65535 - 40;
1751 }
1752 return advmss;
1753}
1754
d33e4553
DM
1755static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1756{
1757 unsigned int mtu = dst->dev->mtu;
1758
1759 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1760 const struct rtable *rt = (const struct rtable *) dst;
1761
1762 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1763 mtu = 576;
1764 }
1765
1766 if (mtu > IP_MAX_MTU)
1767 mtu = IP_MAX_MTU;
1768
1769 return mtu;
1770}
1771
68a5e3dd 1772static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1773 struct fib_info *fi)
a4daad6b 1774{
0131ba45
DM
1775 struct inet_peer *peer;
1776 int create = 0;
a4daad6b 1777
0131ba45
DM
1778 /* If a peer entry exists for this destination, we must hook
1779 * it up in order to get at cached metrics.
1780 */
68a5e3dd 1781 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1782 create = 1;
1783
3c0afdca 1784 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
0131ba45 1785 if (peer) {
3c0afdca 1786 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1787 if (inet_metrics_new(peer))
1788 memcpy(peer->metrics, fi->fib_metrics,
1789 sizeof(u32) * RTAX_MAX);
1790 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c
DM
1791
1792 if (peer->pmtu_expires)
1793 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1794 if (peer->redirect_learned.a4 &&
1795 peer->redirect_learned.a4 != rt->rt_gateway) {
1796 rt->rt_gateway = peer->redirect_learned.a4;
1797 rt->rt_flags |= RTCF_REDIRECTED;
1798 }
0131ba45
DM
1799 } else {
1800 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1801 rt->fi = fi;
1802 atomic_inc(&fi->fib_clntref);
1803 }
1804 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1805 }
1806}
1807
68a5e3dd 1808static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1809 const struct fib_result *res,
982721f3 1810 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1811{
defb3519 1812 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1813
1814 if (fi) {
1815 if (FIB_RES_GW(*res) &&
1816 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1817 rt->rt_gateway = FIB_RES_GW(*res);
68a5e3dd 1818 rt_init_metrics(rt, oldflp4, fi);
c7066f70 1819#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1820 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1821#endif
d33e4553 1822 }
defb3519 1823
defb3519
DM
1824 if (dst_mtu(dst) > IP_MAX_MTU)
1825 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1826 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1827 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1828
c7066f70 1829#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1830#ifdef CONFIG_IP_MULTIPLE_TABLES
1831 set_class_tag(rt, fib_rules_tclass(res));
1832#endif
1833 set_class_tag(rt, itag);
1834#endif
982721f3 1835 rt->rt_type = type;
1da177e4
LT
1836}
1837
0c4dcd58
DM
1838static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
1839{
3c7bd1a1 1840 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
0c4dcd58
DM
1841 if (rt) {
1842 rt->dst.obsolete = -1;
1843
0c4dcd58
DM
1844 rt->dst.flags = DST_HOST |
1845 (nopolicy ? DST_NOPOLICY : 0) |
1846 (noxfrm ? DST_NOXFRM : 0);
1847 }
1848 return rt;
1849}
1850
96d36220 1851/* called in rcu_read_lock() section */
9e12bb22 1852static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1853 u8 tos, struct net_device *dev, int our)
1854{
96d36220 1855 unsigned int hash;
1da177e4 1856 struct rtable *rth;
a61ced5d 1857 __be32 spec_dst;
96d36220 1858 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1859 u32 itag = 0;
b5f7e755 1860 int err;
1da177e4
LT
1861
1862 /* Primary sanity checks. */
1863
1864 if (in_dev == NULL)
1865 return -EINVAL;
1866
1e637c74 1867 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1868 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1869 goto e_inval;
1870
f97c1e0c
JP
1871 if (ipv4_is_zeronet(saddr)) {
1872 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1873 goto e_inval;
1874 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755
ED
1875 } else {
1876 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1877 &itag, 0);
1878 if (err < 0)
1879 goto e_err;
1880 }
0c4dcd58 1881 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1882 if (!rth)
1883 goto e_nobufs;
1884
d8d1f30b 1885 rth->dst.output = ip_rt_bug;
1da177e4 1886
5e2b61f7 1887 rth->rt_key_dst = daddr;
1da177e4 1888 rth->rt_dst = daddr;
5e2b61f7
DM
1889 rth->rt_tos = tos;
1890 rth->rt_mark = skb->mark;
1891 rth->rt_key_src = saddr;
1da177e4 1892 rth->rt_src = saddr;
c7066f70 1893#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 1894 rth->dst.tclassid = itag;
1da177e4 1895#endif
5e2b61f7 1896 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
1897 rth->dst.dev = init_net.loopback_dev;
1898 dev_hold(rth->dst.dev);
5e2b61f7 1899 rth->rt_oif = 0;
1da177e4
LT
1900 rth->rt_gateway = daddr;
1901 rth->rt_spec_dst= spec_dst;
e84f84f2 1902 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1903 rth->rt_flags = RTCF_MULTICAST;
29e75252 1904 rth->rt_type = RTN_MULTICAST;
1da177e4 1905 if (our) {
d8d1f30b 1906 rth->dst.input= ip_local_deliver;
1da177e4
LT
1907 rth->rt_flags |= RTCF_LOCAL;
1908 }
1909
1910#ifdef CONFIG_IP_MROUTE
f97c1e0c 1911 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1912 rth->dst.input = ip_mr_input;
1da177e4
LT
1913#endif
1914 RT_CACHE_STAT_INC(in_slow_mc);
1915
e84f84f2 1916 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe
DM
1917 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1918 err = 0;
1919 if (IS_ERR(rth))
1920 err = PTR_ERR(rth);
1da177e4
LT
1921
1922e_nobufs:
1da177e4 1923 return -ENOBUFS;
1da177e4 1924e_inval:
96d36220 1925 return -EINVAL;
b5f7e755 1926e_err:
b5f7e755 1927 return err;
1da177e4
LT
1928}
1929
1930
1931static void ip_handle_martian_source(struct net_device *dev,
1932 struct in_device *in_dev,
1933 struct sk_buff *skb,
9e12bb22
AV
1934 __be32 daddr,
1935 __be32 saddr)
1da177e4
LT
1936{
1937 RT_CACHE_STAT_INC(in_martian_src);
1938#ifdef CONFIG_IP_ROUTE_VERBOSE
1939 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1940 /*
1941 * RFC1812 recommendation, if source is martian,
1942 * the only hint is MAC header.
1943 */
673d57e7
HH
1944 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1945 &daddr, &saddr, dev->name);
98e399f8 1946 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1947 int i;
98e399f8 1948 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1949 printk(KERN_WARNING "ll header: ");
1950 for (i = 0; i < dev->hard_header_len; i++, p++) {
1951 printk("%02x", *p);
1952 if (i < (dev->hard_header_len - 1))
1953 printk(":");
1954 }
1955 printk("\n");
1956 }
1957 }
1958#endif
1959}
1960
47360228 1961/* called in rcu_read_lock() section */
5969f71d 1962static int __mkroute_input(struct sk_buff *skb,
982721f3 1963 const struct fib_result *res,
5969f71d
SH
1964 struct in_device *in_dev,
1965 __be32 daddr, __be32 saddr, u32 tos,
1966 struct rtable **result)
1da177e4 1967{
1da177e4
LT
1968 struct rtable *rth;
1969 int err;
1970 struct in_device *out_dev;
47360228 1971 unsigned int flags = 0;
d9c9df8c
AV
1972 __be32 spec_dst;
1973 u32 itag;
1da177e4
LT
1974
1975 /* get a working reference to the output device */
47360228 1976 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1977 if (out_dev == NULL) {
1978 if (net_ratelimit())
1979 printk(KERN_CRIT "Bug in ip_route_input" \
1980 "_slow(). Please, report\n");
1981 return -EINVAL;
1982 }
1983
1984
e905a9ed 1985 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
b0c110ca 1986 in_dev->dev, &spec_dst, &itag, skb->mark);
1da177e4 1987 if (err < 0) {
e905a9ed 1988 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1989 saddr);
e905a9ed 1990
1da177e4
LT
1991 goto cleanup;
1992 }
1993
1994 if (err)
1995 flags |= RTCF_DIRECTSRC;
1996
51b77cae 1997 if (out_dev == in_dev && err &&
1da177e4
LT
1998 (IN_DEV_SHARED_MEDIA(out_dev) ||
1999 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
2000 flags |= RTCF_DOREDIRECT;
2001
2002 if (skb->protocol != htons(ETH_P_IP)) {
2003 /* Not IP (i.e. ARP). Do not create route, if it is
2004 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
2005 *
2006 * Proxy arp feature have been extended to allow, ARP
2007 * replies back to the same interface, to support
2008 * Private VLAN switch technologies. See arp.c.
1da177e4 2009 */
65324144
JDB
2010 if (out_dev == in_dev &&
2011 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2012 err = -EINVAL;
2013 goto cleanup;
2014 }
2015 }
2016
0c4dcd58
DM
2017 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2018 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2019 if (!rth) {
2020 err = -ENOBUFS;
2021 goto cleanup;
2022 }
2023
5e2b61f7 2024 rth->rt_key_dst = daddr;
1da177e4 2025 rth->rt_dst = daddr;
5e2b61f7
DM
2026 rth->rt_tos = tos;
2027 rth->rt_mark = skb->mark;
2028 rth->rt_key_src = saddr;
1da177e4
LT
2029 rth->rt_src = saddr;
2030 rth->rt_gateway = daddr;
5e2b61f7 2031 rth->rt_iif = in_dev->dev->ifindex;
d8d1f30b
CG
2032 rth->dst.dev = (out_dev)->dev;
2033 dev_hold(rth->dst.dev);
5e2b61f7 2034 rth->rt_oif = 0;
1da177e4
LT
2035 rth->rt_spec_dst= spec_dst;
2036
d8d1f30b
CG
2037 rth->dst.input = ip_forward;
2038 rth->dst.output = ip_output;
2039 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4 2040
5e2b61f7 2041 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4
LT
2042
2043 rth->rt_flags = flags;
2044
2045 *result = rth;
2046 err = 0;
2047 cleanup:
1da177e4 2048 return err;
e905a9ed 2049}
1da177e4 2050
5969f71d
SH
2051static int ip_mkroute_input(struct sk_buff *skb,
2052 struct fib_result *res,
68a5e3dd 2053 const struct flowi4 *fl4,
5969f71d
SH
2054 struct in_device *in_dev,
2055 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2056{
7abaa27c 2057 struct rtable* rth = NULL;
1da177e4
LT
2058 int err;
2059 unsigned hash;
2060
2061#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 2062 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 2063 fib_select_multipath(res);
1da177e4
LT
2064#endif
2065
2066 /* create a routing cache entry */
2067 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2068 if (err)
2069 return err;
1da177e4
LT
2070
2071 /* put it into the cache */
68a5e3dd 2072 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
d8d1f30b 2073 rt_genid(dev_net(rth->dst.dev)));
68a5e3dd 2074 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
b23dd4fe
DM
2075 if (IS_ERR(rth))
2076 return PTR_ERR(rth);
2077 return 0;
1da177e4
LT
2078}
2079
1da177e4
LT
2080/*
2081 * NOTE. We drop all the packets that has local source
2082 * addresses, because every properly looped back packet
2083 * must have correct destination already attached by output routine.
2084 *
2085 * Such approach solves two big problems:
2086 * 1. Not simplex devices are handled properly.
2087 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2088 * called with rcu_read_lock()
1da177e4
LT
2089 */
2090
9e12bb22 2091static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2092 u8 tos, struct net_device *dev)
2093{
2094 struct fib_result res;
96d36220 2095 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 2096 struct flowi4 fl4;
1da177e4
LT
2097 unsigned flags = 0;
2098 u32 itag = 0;
2099 struct rtable * rth;
2100 unsigned hash;
9e12bb22 2101 __be32 spec_dst;
1da177e4 2102 int err = -EINVAL;
c346dca1 2103 struct net * net = dev_net(dev);
1da177e4
LT
2104
2105 /* IP on this device is disabled. */
2106
2107 if (!in_dev)
2108 goto out;
2109
2110 /* Check for the most weird martians, which can be not detected
2111 by fib_lookup.
2112 */
2113
1e637c74 2114 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2115 ipv4_is_loopback(saddr))
1da177e4
LT
2116 goto martian_source;
2117
27a954bd 2118 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2119 goto brd_input;
2120
2121 /* Accept zero addresses only to limited broadcast;
2122 * I even do not know to fix it or not. Waiting for complains :-)
2123 */
f97c1e0c 2124 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2125 goto martian_source;
2126
27a954bd 2127 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2128 goto martian_destination;
2129
2130 /*
2131 * Now we are ready to route packet.
2132 */
68a5e3dd
DM
2133 fl4.flowi4_oif = 0;
2134 fl4.flowi4_iif = dev->ifindex;
2135 fl4.flowi4_mark = skb->mark;
2136 fl4.flowi4_tos = tos;
2137 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2138 fl4.daddr = daddr;
2139 fl4.saddr = saddr;
2140 err = fib_lookup(net, &fl4, &res);
ebc0ffae 2141 if (err != 0) {
1da177e4 2142 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2143 goto e_hostunreach;
1da177e4
LT
2144 goto no_route;
2145 }
1da177e4
LT
2146
2147 RT_CACHE_STAT_INC(in_slow_tot);
2148
2149 if (res.type == RTN_BROADCAST)
2150 goto brd_input;
2151
2152 if (res.type == RTN_LOCAL) {
b5f7e755 2153 err = fib_validate_source(saddr, daddr, tos,
ebc0ffae
ED
2154 net->loopback_dev->ifindex,
2155 dev, &spec_dst, &itag, skb->mark);
b5f7e755
ED
2156 if (err < 0)
2157 goto martian_source_keep_err;
2158 if (err)
1da177e4
LT
2159 flags |= RTCF_DIRECTSRC;
2160 spec_dst = daddr;
2161 goto local_input;
2162 }
2163
2164 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2165 goto e_hostunreach;
1da177e4
LT
2166 if (res.type != RTN_UNICAST)
2167 goto martian_destination;
2168
68a5e3dd 2169 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
2170out: return err;
2171
2172brd_input:
2173 if (skb->protocol != htons(ETH_P_IP))
2174 goto e_inval;
2175
f97c1e0c 2176 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 else {
2179 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
b0c110ca 2180 &itag, skb->mark);
1da177e4 2181 if (err < 0)
b5f7e755 2182 goto martian_source_keep_err;
1da177e4
LT
2183 if (err)
2184 flags |= RTCF_DIRECTSRC;
2185 }
2186 flags |= RTCF_BROADCAST;
2187 res.type = RTN_BROADCAST;
2188 RT_CACHE_STAT_INC(in_brd);
2189
2190local_input:
0c4dcd58 2191 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2192 if (!rth)
2193 goto e_nobufs;
2194
d8d1f30b 2195 rth->dst.output= ip_rt_bug;
e84f84f2 2196 rth->rt_genid = rt_genid(net);
1da177e4 2197
5e2b61f7 2198 rth->rt_key_dst = daddr;
1da177e4 2199 rth->rt_dst = daddr;
5e2b61f7
DM
2200 rth->rt_tos = tos;
2201 rth->rt_mark = skb->mark;
2202 rth->rt_key_src = saddr;
1da177e4 2203 rth->rt_src = saddr;
c7066f70 2204#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2205 rth->dst.tclassid = itag;
1da177e4 2206#endif
5e2b61f7 2207 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
2208 rth->dst.dev = net->loopback_dev;
2209 dev_hold(rth->dst.dev);
1da177e4
LT
2210 rth->rt_gateway = daddr;
2211 rth->rt_spec_dst= spec_dst;
d8d1f30b 2212 rth->dst.input= ip_local_deliver;
1da177e4
LT
2213 rth->rt_flags = flags|RTCF_LOCAL;
2214 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2215 rth->dst.input= ip_error;
2216 rth->dst.error= -err;
1da177e4
LT
2217 rth->rt_flags &= ~RTCF_LOCAL;
2218 }
2219 rth->rt_type = res.type;
68a5e3dd
DM
2220 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2221 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
b23dd4fe
DM
2222 err = 0;
2223 if (IS_ERR(rth))
2224 err = PTR_ERR(rth);
ebc0ffae 2225 goto out;
1da177e4
LT
2226
2227no_route:
2228 RT_CACHE_STAT_INC(in_no_route);
2229 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2230 res.type = RTN_UNREACHABLE;
7f53878d
MC
2231 if (err == -ESRCH)
2232 err = -ENETUNREACH;
1da177e4
LT
2233 goto local_input;
2234
2235 /*
2236 * Do not cache martian addresses: they should be logged (RFC1812)
2237 */
2238martian_destination:
2239 RT_CACHE_STAT_INC(in_martian_dst);
2240#ifdef CONFIG_IP_ROUTE_VERBOSE
2241 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2242 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2243 &daddr, &saddr, dev->name);
1da177e4 2244#endif
2c2910a4
DE
2245
2246e_hostunreach:
e905a9ed 2247 err = -EHOSTUNREACH;
ebc0ffae 2248 goto out;
2c2910a4 2249
1da177e4
LT
2250e_inval:
2251 err = -EINVAL;
ebc0ffae 2252 goto out;
1da177e4
LT
2253
2254e_nobufs:
2255 err = -ENOBUFS;
ebc0ffae 2256 goto out;
1da177e4
LT
2257
2258martian_source:
b5f7e755
ED
2259 err = -EINVAL;
2260martian_source_keep_err:
1da177e4 2261 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2262 goto out;
1da177e4
LT
2263}
2264
407eadd9
ED
2265int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2266 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2267{
2268 struct rtable * rth;
2269 unsigned hash;
2270 int iif = dev->ifindex;
b5921910 2271 struct net *net;
96d36220 2272 int res;
1da177e4 2273
c346dca1 2274 net = dev_net(dev);
1080d709 2275
96d36220
ED
2276 rcu_read_lock();
2277
1080d709
NH
2278 if (!rt_caching(net))
2279 goto skip_cache;
2280
1da177e4 2281 tos &= IPTOS_RT_MASK;
e84f84f2 2282 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2283
1da177e4 2284 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2285 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2286 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2287 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2288 (rth->rt_iif ^ iif) |
2289 rth->rt_oif |
2290 (rth->rt_tos ^ tos)) == 0 &&
2291 rth->rt_mark == skb->mark &&
d8d1f30b 2292 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2293 !rt_is_expired(rth)) {
407eadd9 2294 if (noref) {
d8d1f30b
CG
2295 dst_use_noref(&rth->dst, jiffies);
2296 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2297 } else {
d8d1f30b
CG
2298 dst_use(&rth->dst, jiffies);
2299 skb_dst_set(skb, &rth->dst);
407eadd9 2300 }
1da177e4
LT
2301 RT_CACHE_STAT_INC(in_hit);
2302 rcu_read_unlock();
1da177e4
LT
2303 return 0;
2304 }
2305 RT_CACHE_STAT_INC(in_hlist_search);
2306 }
1da177e4 2307
1080d709 2308skip_cache:
1da177e4
LT
2309 /* Multicast recognition logic is moved from route cache to here.
2310 The problem was that too many Ethernet cards have broken/missing
2311 hardware multicast filters :-( As result the host on multicasting
2312 network acquires a lot of useless route cache entries, sort of
2313 SDR messages from all the world. Now we try to get rid of them.
2314 Really, provided software IP multicast filter is organized
2315 reasonably (at least, hashed), it does not result in a slowdown
2316 comparing with route cache reject entries.
2317 Note, that multicast routers are not affected, because
2318 route cache entry is created eventually.
2319 */
f97c1e0c 2320 if (ipv4_is_multicast(daddr)) {
96d36220 2321 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2322
96d36220 2323 if (in_dev) {
dbdd9a52
DM
2324 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2325 ip_hdr(skb)->protocol);
1da177e4
LT
2326 if (our
2327#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2328 ||
2329 (!ipv4_is_local_multicast(daddr) &&
2330 IN_DEV_MFORWARD(in_dev))
1da177e4 2331#endif
9d4fb27d 2332 ) {
96d36220
ED
2333 int res = ip_route_input_mc(skb, daddr, saddr,
2334 tos, dev, our);
1da177e4 2335 rcu_read_unlock();
96d36220 2336 return res;
1da177e4
LT
2337 }
2338 }
2339 rcu_read_unlock();
2340 return -EINVAL;
2341 }
96d36220
ED
2342 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2343 rcu_read_unlock();
2344 return res;
1da177e4 2345}
407eadd9 2346EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2347
ebc0ffae 2348/* called with rcu_read_lock() */
982721f3 2349static struct rtable *__mkroute_output(const struct fib_result *res,
68a5e3dd
DM
2350 const struct flowi4 *fl4,
2351 const struct flowi4 *oldflp4,
5ada5527
DM
2352 struct net_device *dev_out,
2353 unsigned int flags)
1da177e4 2354{
982721f3 2355 struct fib_info *fi = res->fi;
68a5e3dd 2356 u32 tos = RT_FL_TOS(oldflp4);
5ada5527 2357 struct in_device *in_dev;
982721f3 2358 u16 type = res->type;
5ada5527 2359 struct rtable *rth;
1da177e4 2360
68a5e3dd 2361 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2362 return ERR_PTR(-EINVAL);
1da177e4 2363
68a5e3dd 2364 if (ipv4_is_lbcast(fl4->daddr))
982721f3 2365 type = RTN_BROADCAST;
68a5e3dd 2366 else if (ipv4_is_multicast(fl4->daddr))
982721f3 2367 type = RTN_MULTICAST;
68a5e3dd 2368 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 2369 return ERR_PTR(-EINVAL);
1da177e4
LT
2370
2371 if (dev_out->flags & IFF_LOOPBACK)
2372 flags |= RTCF_LOCAL;
2373
dd28d1a0 2374 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2375 if (!in_dev)
5ada5527 2376 return ERR_PTR(-EINVAL);
ebc0ffae 2377
982721f3 2378 if (type == RTN_BROADCAST) {
1da177e4 2379 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2380 fi = NULL;
2381 } else if (type == RTN_MULTICAST) {
dd28d1a0 2382 flags |= RTCF_MULTICAST | RTCF_LOCAL;
68a5e3dd
DM
2383 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr,
2384 oldflp4->flowi4_proto))
1da177e4
LT
2385 flags &= ~RTCF_LOCAL;
2386 /* If multicast route do not exist use
dd28d1a0
ED
2387 * default one, but do not gateway in this case.
2388 * Yes, it is hack.
1da177e4 2389 */
982721f3
DM
2390 if (fi && res->prefixlen < 4)
2391 fi = NULL;
1da177e4
LT
2392 }
2393
0c4dcd58
DM
2394 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2395 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2396 if (!rth)
5ada5527 2397 return ERR_PTR(-ENOBUFS);
8391d07b 2398
68a5e3dd 2399 rth->rt_key_dst = oldflp4->daddr;
5e2b61f7 2400 rth->rt_tos = tos;
68a5e3dd
DM
2401 rth->rt_key_src = oldflp4->saddr;
2402 rth->rt_oif = oldflp4->flowi4_oif;
2403 rth->rt_mark = oldflp4->flowi4_mark;
2404 rth->rt_dst = fl4->daddr;
2405 rth->rt_src = fl4->saddr;
1018b5c0 2406 rth->rt_iif = 0;
e905a9ed 2407 /* get references to the devices that are to be hold by the routing
1da177e4 2408 cache entry */
d8d1f30b 2409 rth->dst.dev = dev_out;
1da177e4 2410 dev_hold(dev_out);
68a5e3dd
DM
2411 rth->rt_gateway = fl4->daddr;
2412 rth->rt_spec_dst= fl4->saddr;
1da177e4 2413
d8d1f30b 2414 rth->dst.output=ip_output;
e84f84f2 2415 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2416
2417 RT_CACHE_STAT_INC(out_slow_tot);
2418
2419 if (flags & RTCF_LOCAL) {
d8d1f30b 2420 rth->dst.input = ip_local_deliver;
68a5e3dd 2421 rth->rt_spec_dst = fl4->daddr;
1da177e4
LT
2422 }
2423 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
68a5e3dd 2424 rth->rt_spec_dst = fl4->saddr;
e905a9ed 2425 if (flags & RTCF_LOCAL &&
1da177e4 2426 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2427 rth->dst.output = ip_mc_output;
1da177e4
LT
2428 RT_CACHE_STAT_INC(out_slow_mc);
2429 }
2430#ifdef CONFIG_IP_MROUTE
982721f3 2431 if (type == RTN_MULTICAST) {
1da177e4 2432 if (IN_DEV_MFORWARD(in_dev) &&
68a5e3dd 2433 !ipv4_is_local_multicast(oldflp4->daddr)) {
d8d1f30b
CG
2434 rth->dst.input = ip_mr_input;
2435 rth->dst.output = ip_mc_output;
1da177e4
LT
2436 }
2437 }
2438#endif
2439 }
2440
68a5e3dd 2441 rt_set_nexthop(rth, oldflp4, res, fi, type, 0);
1da177e4
LT
2442
2443 rth->rt_flags = flags;
5ada5527 2444 return rth;
1da177e4
LT
2445}
2446
1da177e4
LT
2447/*
2448 * Major route resolver routine.
0197aa38 2449 * called with rcu_read_lock();
1da177e4
LT
2450 */
2451
b23dd4fe 2452static struct rtable *ip_route_output_slow(struct net *net,
68a5e3dd 2453 const struct flowi4 *oldflp4)
1da177e4 2454{
68a5e3dd
DM
2455 u32 tos = RT_FL_TOS(oldflp4);
2456 struct flowi4 fl4;
1da177e4 2457 struct fib_result res;
0197aa38 2458 unsigned int flags = 0;
1da177e4 2459 struct net_device *dev_out = NULL;
5ada5527 2460 struct rtable *rth;
1da177e4
LT
2461
2462 res.fi = NULL;
2463#ifdef CONFIG_IP_MULTIPLE_TABLES
2464 res.r = NULL;
2465#endif
2466
68a5e3dd
DM
2467 fl4.flowi4_oif = oldflp4->flowi4_oif;
2468 fl4.flowi4_iif = net->loopback_dev->ifindex;
2469 fl4.flowi4_mark = oldflp4->flowi4_mark;
2470 fl4.daddr = oldflp4->daddr;
2471 fl4.saddr = oldflp4->saddr;
2472 fl4.flowi4_tos = tos & IPTOS_RT_MASK;
2473 fl4.flowi4_scope = ((tos & RTO_ONLINK) ?
44713b67
DM
2474 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2475
010c2708 2476 rcu_read_lock();
68a5e3dd 2477 if (oldflp4->saddr) {
b23dd4fe 2478 rth = ERR_PTR(-EINVAL);
68a5e3dd
DM
2479 if (ipv4_is_multicast(oldflp4->saddr) ||
2480 ipv4_is_lbcast(oldflp4->saddr) ||
2481 ipv4_is_zeronet(oldflp4->saddr))
1da177e4
LT
2482 goto out;
2483
1da177e4
LT
2484 /* I removed check for oif == dev_out->oif here.
2485 It was wrong for two reasons:
1ab35276
DL
2486 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2487 is assigned to multiple interfaces.
1da177e4
LT
2488 2. Moreover, we are allowed to send packets with saddr
2489 of another iface. --ANK
2490 */
2491
68a5e3dd
DM
2492 if (oldflp4->flowi4_oif == 0 &&
2493 (ipv4_is_multicast(oldflp4->daddr) ||
2494 ipv4_is_lbcast(oldflp4->daddr))) {
a210d01a 2495 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2496 dev_out = __ip_dev_find(net, oldflp4->saddr, false);
a210d01a
JA
2497 if (dev_out == NULL)
2498 goto out;
2499
1da177e4
LT
2500 /* Special hack: user can direct multicasts
2501 and limited broadcast via necessary interface
2502 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2503 This hack is not just for fun, it allows
2504 vic,vat and friends to work.
2505 They bind socket to loopback, set ttl to zero
2506 and expect that it will work.
2507 From the viewpoint of routing cache they are broken,
2508 because we are not allowed to build multicast path
2509 with loopback source addr (look, routing cache
2510 cannot know, that ttl is zero, so that packet
2511 will not leave this host and route is valid).
2512 Luckily, this hack is good workaround.
2513 */
2514
68a5e3dd 2515 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2516 goto make_route;
2517 }
a210d01a 2518
68a5e3dd 2519 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 2520 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2521 if (!__ip_dev_find(net, oldflp4->saddr, false))
a210d01a 2522 goto out;
a210d01a 2523 }
1da177e4
LT
2524 }
2525
2526
68a5e3dd
DM
2527 if (oldflp4->flowi4_oif) {
2528 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif);
b23dd4fe 2529 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2530 if (dev_out == NULL)
2531 goto out;
e5ed6399
HX
2532
2533 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2534 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2535 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2536 goto out;
2537 }
68a5e3dd
DM
2538 if (ipv4_is_local_multicast(oldflp4->daddr) ||
2539 ipv4_is_lbcast(oldflp4->daddr)) {
2540 if (!fl4.saddr)
2541 fl4.saddr = inet_select_addr(dev_out, 0,
2542 RT_SCOPE_LINK);
1da177e4
LT
2543 goto make_route;
2544 }
68a5e3dd
DM
2545 if (!fl4.saddr) {
2546 if (ipv4_is_multicast(oldflp4->daddr))
2547 fl4.saddr = inet_select_addr(dev_out, 0,
2548 fl4.flowi4_scope);
2549 else if (!oldflp4->daddr)
2550 fl4.saddr = inet_select_addr(dev_out, 0,
2551 RT_SCOPE_HOST);
1da177e4
LT
2552 }
2553 }
2554
68a5e3dd
DM
2555 if (!fl4.daddr) {
2556 fl4.daddr = fl4.saddr;
2557 if (!fl4.daddr)
2558 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK);
b40afd0e 2559 dev_out = net->loopback_dev;
68a5e3dd 2560 fl4.flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
2561 res.type = RTN_LOCAL;
2562 flags |= RTCF_LOCAL;
2563 goto make_route;
2564 }
2565
68a5e3dd 2566 if (fib_lookup(net, &fl4, &res)) {
1da177e4 2567 res.fi = NULL;
68a5e3dd 2568 if (oldflp4->flowi4_oif) {
1da177e4
LT
2569 /* Apparently, routing tables are wrong. Assume,
2570 that the destination is on link.
2571
2572 WHY? DW.
2573 Because we are allowed to send to iface
2574 even if it has NO routes and NO assigned
2575 addresses. When oif is specified, routing
2576 tables are looked up with only one purpose:
2577 to catch if destination is gatewayed, rather than
2578 direct. Moreover, if MSG_DONTROUTE is set,
2579 we send packet, ignoring both routing tables
2580 and ifaddr state. --ANK
2581
2582
2583 We could make it even if oif is unknown,
2584 likely IPv6, but we do not.
2585 */
2586
68a5e3dd
DM
2587 if (fl4.saddr == 0)
2588 fl4.saddr = inet_select_addr(dev_out, 0,
2589 RT_SCOPE_LINK);
1da177e4
LT
2590 res.type = RTN_UNICAST;
2591 goto make_route;
2592 }
b23dd4fe 2593 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2594 goto out;
2595 }
1da177e4
LT
2596
2597 if (res.type == RTN_LOCAL) {
68a5e3dd 2598 if (!fl4.saddr) {
9fc3bbb4 2599 if (res.fi->fib_prefsrc)
68a5e3dd 2600 fl4.saddr = res.fi->fib_prefsrc;
9fc3bbb4 2601 else
68a5e3dd 2602 fl4.saddr = fl4.daddr;
9fc3bbb4 2603 }
b40afd0e 2604 dev_out = net->loopback_dev;
68a5e3dd 2605 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2606 res.fi = NULL;
2607 flags |= RTCF_LOCAL;
2608 goto make_route;
2609 }
2610
2611#ifdef CONFIG_IP_ROUTE_MULTIPATH
68a5e3dd 2612 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0)
1b7fe593 2613 fib_select_multipath(&res);
1da177e4
LT
2614 else
2615#endif
68a5e3dd 2616 if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif)
0c838ff1 2617 fib_select_default(&res);
1da177e4 2618
68a5e3dd
DM
2619 if (!fl4.saddr)
2620 fl4.saddr = FIB_RES_PREFSRC(res);
1da177e4 2621
1da177e4 2622 dev_out = FIB_RES_DEV(res);
68a5e3dd 2623 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2624
2625
2626make_route:
68a5e3dd 2627 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags);
b23dd4fe 2628 if (!IS_ERR(rth)) {
5ada5527
DM
2629 unsigned int hash;
2630
68a5e3dd 2631 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif,
5ada5527 2632 rt_genid(dev_net(dev_out)));
68a5e3dd 2633 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif);
5ada5527 2634 }
1da177e4 2635
010c2708
DM
2636out:
2637 rcu_read_unlock();
b23dd4fe 2638 return rth;
1da177e4
LT
2639}
2640
9d6ec938 2641struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
1da177e4 2642{
1da177e4 2643 struct rtable *rth;
010c2708 2644 unsigned int hash;
1da177e4 2645
1080d709
NH
2646 if (!rt_caching(net))
2647 goto slow_output;
2648
9d6ec938 2649 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
1da177e4
LT
2650
2651 rcu_read_lock_bh();
a898def2 2652 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2653 rth = rcu_dereference_bh(rth->dst.rt_next)) {
9d6ec938
DM
2654 if (rth->rt_key_dst == flp4->daddr &&
2655 rth->rt_key_src == flp4->saddr &&
c7537967 2656 rt_is_output_route(rth) &&
9d6ec938
DM
2657 rth->rt_oif == flp4->flowi4_oif &&
2658 rth->rt_mark == flp4->flowi4_mark &&
2659 !((rth->rt_tos ^ flp4->flowi4_tos) &
b5921910 2660 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2661 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2662 !rt_is_expired(rth)) {
d8d1f30b 2663 dst_use(&rth->dst, jiffies);
1da177e4
LT
2664 RT_CACHE_STAT_INC(out_hit);
2665 rcu_read_unlock_bh();
b23dd4fe 2666 return rth;
1da177e4
LT
2667 }
2668 RT_CACHE_STAT_INC(out_hlist_search);
2669 }
2670 rcu_read_unlock_bh();
2671
1080d709 2672slow_output:
9d6ec938 2673 return ip_route_output_slow(net, flp4);
1da177e4 2674}
d8c97a94
ACM
2675EXPORT_SYMBOL_GPL(__ip_route_output_key);
2676
ae2688d5
JW
2677static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2678{
2679 return NULL;
2680}
2681
ec831ea7
RD
2682static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2683{
2684 return 0;
2685}
2686
14e50e57
DM
2687static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2688{
2689}
2690
2691static struct dst_ops ipv4_dst_blackhole_ops = {
2692 .family = AF_INET,
09640e63 2693 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2694 .destroy = ipv4_dst_destroy,
ae2688d5 2695 .check = ipv4_blackhole_dst_check,
ec831ea7 2696 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2697 .default_advmss = ipv4_default_advmss,
14e50e57 2698 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
14e50e57
DM
2699};
2700
2774c131 2701struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2702{
2774c131
DM
2703 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
2704 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2705
2706 if (rt) {
d8d1f30b 2707 struct dst_entry *new = &rt->dst;
14e50e57 2708
14e50e57 2709 new->__use = 1;
352e512c
HX
2710 new->input = dst_discard;
2711 new->output = dst_discard;
defb3519 2712 dst_copy_metrics(new, &ort->dst);
14e50e57 2713
d8d1f30b 2714 new->dev = ort->dst.dev;
14e50e57
DM
2715 if (new->dev)
2716 dev_hold(new->dev);
2717
5e2b61f7
DM
2718 rt->rt_key_dst = ort->rt_key_dst;
2719 rt->rt_key_src = ort->rt_key_src;
2720 rt->rt_tos = ort->rt_tos;
2721 rt->rt_iif = ort->rt_iif;
2722 rt->rt_oif = ort->rt_oif;
2723 rt->rt_mark = ort->rt_mark;
14e50e57 2724
e84f84f2 2725 rt->rt_genid = rt_genid(net);
14e50e57
DM
2726 rt->rt_flags = ort->rt_flags;
2727 rt->rt_type = ort->rt_type;
2728 rt->rt_dst = ort->rt_dst;
2729 rt->rt_src = ort->rt_src;
2730 rt->rt_iif = ort->rt_iif;
2731 rt->rt_gateway = ort->rt_gateway;
2732 rt->rt_spec_dst = ort->rt_spec_dst;
2733 rt->peer = ort->peer;
2734 if (rt->peer)
2735 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2736 rt->fi = ort->fi;
2737 if (rt->fi)
2738 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2739
2740 dst_free(new);
2741 }
2742
2774c131
DM
2743 dst_release(dst_orig);
2744
2745 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2746}
2747
9d6ec938 2748struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2749 struct sock *sk)
1da177e4 2750{
9d6ec938 2751 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2752
b23dd4fe
DM
2753 if (IS_ERR(rt))
2754 return rt;
1da177e4 2755
9d6ec938
DM
2756 if (flp4->flowi4_proto) {
2757 if (!flp4->saddr)
2758 flp4->saddr = rt->rt_src;
2759 if (!flp4->daddr)
2760 flp4->daddr = rt->rt_dst;
2761 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2762 flowi4_to_flowi(flp4),
2763 sk, 0);
1da177e4
LT
2764 }
2765
b23dd4fe 2766 return rt;
1da177e4 2767}
d8c97a94
ACM
2768EXPORT_SYMBOL_GPL(ip_route_output_flow);
2769
4feb88e5
BT
2770static int rt_fill_info(struct net *net,
2771 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2772 int nowait, unsigned int flags)
1da177e4 2773{
511c3f92 2774 struct rtable *rt = skb_rtable(skb);
1da177e4 2775 struct rtmsg *r;
be403ea1 2776 struct nlmsghdr *nlh;
e3703b3d
TG
2777 long expires;
2778 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2779
2780 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2781 if (nlh == NULL)
26932566 2782 return -EMSGSIZE;
be403ea1
TG
2783
2784 r = nlmsg_data(nlh);
1da177e4
LT
2785 r->rtm_family = AF_INET;
2786 r->rtm_dst_len = 32;
2787 r->rtm_src_len = 0;
5e2b61f7 2788 r->rtm_tos = rt->rt_tos;
1da177e4 2789 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2790 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2791 r->rtm_type = rt->rt_type;
2792 r->rtm_scope = RT_SCOPE_UNIVERSE;
2793 r->rtm_protocol = RTPROT_UNSPEC;
2794 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2795 if (rt->rt_flags & RTCF_NOTIFY)
2796 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2797
17fb2c64 2798 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2799
5e2b61f7 2800 if (rt->rt_key_src) {
1da177e4 2801 r->rtm_src_len = 32;
5e2b61f7 2802 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
1da177e4 2803 }
d8d1f30b
CG
2804 if (rt->dst.dev)
2805 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2806#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2807 if (rt->dst.tclassid)
2808 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2809#endif
c7537967 2810 if (rt_is_input_route(rt))
17fb2c64 2811 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
5e2b61f7 2812 else if (rt->rt_src != rt->rt_key_src)
17fb2c64 2813 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2814
1da177e4 2815 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2816 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2817
defb3519 2818 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2819 goto nla_put_failure;
2820
5e2b61f7
DM
2821 if (rt->rt_mark)
2822 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
963bfeee 2823
d8d1f30b 2824 error = rt->dst.error;
2c8cec5c
DM
2825 expires = (rt->peer && rt->peer->pmtu_expires) ?
2826 rt->peer->pmtu_expires - jiffies : 0;
1da177e4 2827 if (rt->peer) {
317fe0e6 2828 inet_peer_refcheck(rt->peer);
2c1409a0 2829 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2830 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2831 ts = rt->peer->tcp_ts;
9d729f72 2832 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2833 }
2834 }
be403ea1 2835
c7537967 2836 if (rt_is_input_route(rt)) {
1da177e4 2837#ifdef CONFIG_IP_MROUTE
e448515c 2838 __be32 dst = rt->rt_dst;
1da177e4 2839
f97c1e0c 2840 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2841 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2842 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2843 if (err <= 0) {
2844 if (!nowait) {
2845 if (err == 0)
2846 return 0;
be403ea1 2847 goto nla_put_failure;
1da177e4
LT
2848 } else {
2849 if (err == -EMSGSIZE)
be403ea1 2850 goto nla_put_failure;
e3703b3d 2851 error = err;
1da177e4
LT
2852 }
2853 }
2854 } else
2855#endif
5e2b61f7 2856 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
1da177e4
LT
2857 }
2858
d8d1f30b 2859 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2860 expires, error) < 0)
2861 goto nla_put_failure;
be403ea1
TG
2862
2863 return nlmsg_end(skb, nlh);
1da177e4 2864
be403ea1 2865nla_put_failure:
26932566
PM
2866 nlmsg_cancel(skb, nlh);
2867 return -EMSGSIZE;
1da177e4
LT
2868}
2869
63f3444f 2870static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2871{
3b1e0a65 2872 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2873 struct rtmsg *rtm;
2874 struct nlattr *tb[RTA_MAX+1];
1da177e4 2875 struct rtable *rt = NULL;
9e12bb22
AV
2876 __be32 dst = 0;
2877 __be32 src = 0;
2878 u32 iif;
d889ce3b 2879 int err;
963bfeee 2880 int mark;
1da177e4
LT
2881 struct sk_buff *skb;
2882
d889ce3b
TG
2883 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2884 if (err < 0)
2885 goto errout;
2886
2887 rtm = nlmsg_data(nlh);
2888
1da177e4 2889 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2890 if (skb == NULL) {
2891 err = -ENOBUFS;
2892 goto errout;
2893 }
1da177e4
LT
2894
2895 /* Reserve room for dummy headers, this skb can pass
2896 through good chunk of routing engine.
2897 */
459a98ed 2898 skb_reset_mac_header(skb);
c1d2bbe1 2899 skb_reset_network_header(skb);
d2c962b8
SH
2900
2901 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2902 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2903 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2904
17fb2c64
AV
2905 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2906 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2907 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2908 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2909
2910 if (iif) {
d889ce3b
TG
2911 struct net_device *dev;
2912
1937504d 2913 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2914 if (dev == NULL) {
2915 err = -ENODEV;
2916 goto errout_free;
2917 }
2918
1da177e4
LT
2919 skb->protocol = htons(ETH_P_IP);
2920 skb->dev = dev;
963bfeee 2921 skb->mark = mark;
1da177e4
LT
2922 local_bh_disable();
2923 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2924 local_bh_enable();
d889ce3b 2925
511c3f92 2926 rt = skb_rtable(skb);
d8d1f30b
CG
2927 if (err == 0 && rt->dst.error)
2928 err = -rt->dst.error;
1da177e4 2929 } else {
68a5e3dd
DM
2930 struct flowi4 fl4 = {
2931 .daddr = dst,
2932 .saddr = src,
2933 .flowi4_tos = rtm->rtm_tos,
2934 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2935 .flowi4_mark = mark,
d889ce3b 2936 };
9d6ec938 2937 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
2938
2939 err = 0;
2940 if (IS_ERR(rt))
2941 err = PTR_ERR(rt);
1da177e4 2942 }
d889ce3b 2943
1da177e4 2944 if (err)
d889ce3b 2945 goto errout_free;
1da177e4 2946
d8d1f30b 2947 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2948 if (rtm->rtm_flags & RTM_F_NOTIFY)
2949 rt->rt_flags |= RTCF_NOTIFY;
2950
4feb88e5 2951 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2952 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2953 if (err <= 0)
2954 goto errout_free;
1da177e4 2955
1937504d 2956 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2957errout:
2942e900 2958 return err;
1da177e4 2959
d889ce3b 2960errout_free:
1da177e4 2961 kfree_skb(skb);
d889ce3b 2962 goto errout;
1da177e4
LT
2963}
2964
2965int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2966{
2967 struct rtable *rt;
2968 int h, s_h;
2969 int idx, s_idx;
1937504d
DL
2970 struct net *net;
2971
3b1e0a65 2972 net = sock_net(skb->sk);
1da177e4
LT
2973
2974 s_h = cb->args[0];
d8c92830
ED
2975 if (s_h < 0)
2976 s_h = 0;
1da177e4 2977 s_idx = idx = cb->args[1];
a6272665
ED
2978 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2979 if (!rt_hash_table[h].chain)
2980 continue;
1da177e4 2981 rcu_read_lock_bh();
a898def2 2982 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2983 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2984 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2985 continue;
e84f84f2 2986 if (rt_is_expired(rt))
29e75252 2987 continue;
d8d1f30b 2988 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2989 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 2990 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 2991 1, NLM_F_MULTI) <= 0) {
adf30907 2992 skb_dst_drop(skb);
1da177e4
LT
2993 rcu_read_unlock_bh();
2994 goto done;
2995 }
adf30907 2996 skb_dst_drop(skb);
1da177e4
LT
2997 }
2998 rcu_read_unlock_bh();
2999 }
3000
3001done:
3002 cb->args[0] = h;
3003 cb->args[1] = idx;
3004 return skb->len;
3005}
3006
3007void ip_rt_multicast_event(struct in_device *in_dev)
3008{
76e6ebfb 3009 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3010}
3011
3012#ifdef CONFIG_SYSCTL
81c684d1 3013static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3014 void __user *buffer,
1da177e4
LT
3015 size_t *lenp, loff_t *ppos)
3016{
3017 if (write) {
639e104f 3018 int flush_delay;
81c684d1 3019 ctl_table ctl;
39a23e75 3020 struct net *net;
639e104f 3021
81c684d1
DL
3022 memcpy(&ctl, __ctl, sizeof(ctl));
3023 ctl.data = &flush_delay;
8d65af78 3024 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3025
81c684d1 3026 net = (struct net *)__ctl->extra1;
39a23e75 3027 rt_cache_flush(net, flush_delay);
1da177e4 3028 return 0;
e905a9ed 3029 }
1da177e4
LT
3030
3031 return -EINVAL;
3032}
3033
eeb61f71 3034static ctl_table ipv4_route_table[] = {
1da177e4 3035 {
1da177e4
LT
3036 .procname = "gc_thresh",
3037 .data = &ipv4_dst_ops.gc_thresh,
3038 .maxlen = sizeof(int),
3039 .mode = 0644,
6d9f239a 3040 .proc_handler = proc_dointvec,
1da177e4
LT
3041 },
3042 {
1da177e4
LT
3043 .procname = "max_size",
3044 .data = &ip_rt_max_size,
3045 .maxlen = sizeof(int),
3046 .mode = 0644,
6d9f239a 3047 .proc_handler = proc_dointvec,
1da177e4
LT
3048 },
3049 {
3050 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3051
1da177e4
LT
3052 .procname = "gc_min_interval",
3053 .data = &ip_rt_gc_min_interval,
3054 .maxlen = sizeof(int),
3055 .mode = 0644,
6d9f239a 3056 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3057 },
3058 {
1da177e4
LT
3059 .procname = "gc_min_interval_ms",
3060 .data = &ip_rt_gc_min_interval,
3061 .maxlen = sizeof(int),
3062 .mode = 0644,
6d9f239a 3063 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3064 },
3065 {
1da177e4
LT
3066 .procname = "gc_timeout",
3067 .data = &ip_rt_gc_timeout,
3068 .maxlen = sizeof(int),
3069 .mode = 0644,
6d9f239a 3070 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3071 },
3072 {
1da177e4
LT
3073 .procname = "gc_interval",
3074 .data = &ip_rt_gc_interval,
3075 .maxlen = sizeof(int),
3076 .mode = 0644,
6d9f239a 3077 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3078 },
3079 {
1da177e4
LT
3080 .procname = "redirect_load",
3081 .data = &ip_rt_redirect_load,
3082 .maxlen = sizeof(int),
3083 .mode = 0644,
6d9f239a 3084 .proc_handler = proc_dointvec,
1da177e4
LT
3085 },
3086 {
1da177e4
LT
3087 .procname = "redirect_number",
3088 .data = &ip_rt_redirect_number,
3089 .maxlen = sizeof(int),
3090 .mode = 0644,
6d9f239a 3091 .proc_handler = proc_dointvec,
1da177e4
LT
3092 },
3093 {
1da177e4
LT
3094 .procname = "redirect_silence",
3095 .data = &ip_rt_redirect_silence,
3096 .maxlen = sizeof(int),
3097 .mode = 0644,
6d9f239a 3098 .proc_handler = proc_dointvec,
1da177e4
LT
3099 },
3100 {
1da177e4
LT
3101 .procname = "error_cost",
3102 .data = &ip_rt_error_cost,
3103 .maxlen = sizeof(int),
3104 .mode = 0644,
6d9f239a 3105 .proc_handler = proc_dointvec,
1da177e4
LT
3106 },
3107 {
1da177e4
LT
3108 .procname = "error_burst",
3109 .data = &ip_rt_error_burst,
3110 .maxlen = sizeof(int),
3111 .mode = 0644,
6d9f239a 3112 .proc_handler = proc_dointvec,
1da177e4
LT
3113 },
3114 {
1da177e4
LT
3115 .procname = "gc_elasticity",
3116 .data = &ip_rt_gc_elasticity,
3117 .maxlen = sizeof(int),
3118 .mode = 0644,
6d9f239a 3119 .proc_handler = proc_dointvec,
1da177e4
LT
3120 },
3121 {
1da177e4
LT
3122 .procname = "mtu_expires",
3123 .data = &ip_rt_mtu_expires,
3124 .maxlen = sizeof(int),
3125 .mode = 0644,
6d9f239a 3126 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3127 },
3128 {
1da177e4
LT
3129 .procname = "min_pmtu",
3130 .data = &ip_rt_min_pmtu,
3131 .maxlen = sizeof(int),
3132 .mode = 0644,
6d9f239a 3133 .proc_handler = proc_dointvec,
1da177e4
LT
3134 },
3135 {
1da177e4
LT
3136 .procname = "min_adv_mss",
3137 .data = &ip_rt_min_advmss,
3138 .maxlen = sizeof(int),
3139 .mode = 0644,
6d9f239a 3140 .proc_handler = proc_dointvec,
1da177e4 3141 },
f8572d8f 3142 { }
1da177e4 3143};
39a23e75 3144
2f4520d3
AV
3145static struct ctl_table empty[1];
3146
3147static struct ctl_table ipv4_skeleton[] =
3148{
f8572d8f 3149 { .procname = "route",
d994af0d 3150 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3151 { .procname = "neigh",
d994af0d 3152 .mode = 0555, .child = empty},
2f4520d3
AV
3153 { }
3154};
3155
3156static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3157 { .procname = "net", },
3158 { .procname = "ipv4", },
39a23e75
DL
3159 { },
3160};
3161
39a23e75
DL
3162static struct ctl_table ipv4_route_flush_table[] = {
3163 {
39a23e75
DL
3164 .procname = "flush",
3165 .maxlen = sizeof(int),
3166 .mode = 0200,
6d9f239a 3167 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3168 },
f8572d8f 3169 { },
39a23e75
DL
3170};
3171
2f4520d3 3172static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3173 { .procname = "net", },
3174 { .procname = "ipv4", },
3175 { .procname = "route", },
2f4520d3
AV
3176 { },
3177};
3178
39a23e75
DL
3179static __net_init int sysctl_route_net_init(struct net *net)
3180{
3181 struct ctl_table *tbl;
3182
3183 tbl = ipv4_route_flush_table;
09ad9bc7 3184 if (!net_eq(net, &init_net)) {
39a23e75
DL
3185 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3186 if (tbl == NULL)
3187 goto err_dup;
3188 }
3189 tbl[0].extra1 = net;
3190
3191 net->ipv4.route_hdr =
3192 register_net_sysctl_table(net, ipv4_route_path, tbl);
3193 if (net->ipv4.route_hdr == NULL)
3194 goto err_reg;
3195 return 0;
3196
3197err_reg:
3198 if (tbl != ipv4_route_flush_table)
3199 kfree(tbl);
3200err_dup:
3201 return -ENOMEM;
3202}
3203
3204static __net_exit void sysctl_route_net_exit(struct net *net)
3205{
3206 struct ctl_table *tbl;
3207
3208 tbl = net->ipv4.route_hdr->ctl_table_arg;
3209 unregister_net_sysctl_table(net->ipv4.route_hdr);
3210 BUG_ON(tbl == ipv4_route_flush_table);
3211 kfree(tbl);
3212}
3213
3214static __net_initdata struct pernet_operations sysctl_route_ops = {
3215 .init = sysctl_route_net_init,
3216 .exit = sysctl_route_net_exit,
3217};
1da177e4
LT
3218#endif
3219
3ee94372 3220static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3221{
3ee94372
NH
3222 get_random_bytes(&net->ipv4.rt_genid,
3223 sizeof(net->ipv4.rt_genid));
9f5e97e5
DL
3224 return 0;
3225}
3226
3ee94372
NH
3227static __net_initdata struct pernet_operations rt_genid_ops = {
3228 .init = rt_genid_init,
9f5e97e5
DL
3229};
3230
3231
c7066f70 3232#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3233struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3234#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3235
3236static __initdata unsigned long rhash_entries;
3237static int __init set_rhash_entries(char *str)
3238{
3239 if (!str)
3240 return 0;
3241 rhash_entries = simple_strtoul(str, &str, 0);
3242 return 1;
3243}
3244__setup("rhash_entries=", set_rhash_entries);
3245
3246int __init ip_rt_init(void)
3247{
424c4b70 3248 int rc = 0;
1da177e4 3249
c7066f70 3250#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3251 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3252 if (!ip_rt_acct)
3253 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3254#endif
3255
e5d679f3
AD
3256 ipv4_dst_ops.kmem_cachep =
3257 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3258 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3259
14e50e57
DM
3260 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3261
fc66f95c
ED
3262 if (dst_entries_init(&ipv4_dst_ops) < 0)
3263 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3264
3265 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3266 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3267
424c4b70
ED
3268 rt_hash_table = (struct rt_hash_bucket *)
3269 alloc_large_system_hash("IP route cache",
3270 sizeof(struct rt_hash_bucket),
3271 rhash_entries,
4481374c 3272 (totalram_pages >= 128 * 1024) ?
18955cfc 3273 15 : 17,
8d1502de 3274 0,
424c4b70
ED
3275 &rt_hash_log,
3276 &rt_hash_mask,
c9503e0f 3277 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3278 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3279 rt_hash_lock_init();
1da177e4
LT
3280
3281 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3282 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3283
1da177e4
LT
3284 devinet_init();
3285 ip_fib_init();
3286
73b38711 3287 if (ip_rt_proc_init())
107f1634 3288 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3289#ifdef CONFIG_XFRM
3290 xfrm_init();
a33bc5c1 3291 xfrm4_init(ip_rt_max_size);
1da177e4 3292#endif
63f3444f
TG
3293 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3294
39a23e75
DL
3295#ifdef CONFIG_SYSCTL
3296 register_pernet_subsys(&sysctl_route_ops);
3297#endif
3ee94372 3298 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3299 return rc;
3300}
3301
a1bc6eb4 3302#ifdef CONFIG_SYSCTL
eeb61f71
AV
3303/*
3304 * We really need to sanitize the damn ipv4 init order, then all
3305 * this nonsense will go away.
3306 */
3307void __init ip_static_sysctl_init(void)
3308{
2f4520d3 3309 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3310}
a1bc6eb4 3311#endif