[IPV6] SIT: Add PRL management for ISATAP.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /* Changes:
17 *
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
25 * Ville Nuorvala
26 * Fixed routing subtrees.
27 */
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/nsproxy.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/xfrm.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
57
58 #include <asm/uaccess.h>
59
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
63
64 /* Set to 3 to get tracing. */
65 #define RT6_DEBUG 2
66
67 #if RT6_DEBUG >= 3
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
70 #else
71 #define RDBG(x)
72 #define RT6_TRACE(x...) do { ; } while (0)
73 #endif
74
75 #define CLONE_OFFLINK_ROUTE 0
76
77 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
78 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static void ip6_link_failure(struct sk_buff *skb);
88 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex,
94 unsigned pref);
95 static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex);
98 #endif
99
100 static struct dst_ops ip6_dst_ops_template = {
101 .family = AF_INET6,
102 .protocol = __constant_htons(ETH_P_IPV6),
103 .gc = ip6_dst_gc,
104 .gc_thresh = 1024,
105 .check = ip6_dst_check,
106 .destroy = ip6_dst_destroy,
107 .ifdown = ip6_dst_ifdown,
108 .negative_advice = ip6_negative_advice,
109 .link_failure = ip6_link_failure,
110 .update_pmtu = ip6_rt_update_pmtu,
111 .local_out = ip6_local_out,
112 .entry_size = sizeof(struct rt6_info),
113 .entries = ATOMIC_INIT(0),
114 };
115
116 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
117 {
118 }
119
120 static struct dst_ops ip6_dst_blackhole_ops = {
121 .family = AF_INET6,
122 .protocol = __constant_htons(ETH_P_IPV6),
123 .destroy = ip6_dst_destroy,
124 .check = ip6_dst_check,
125 .update_pmtu = ip6_rt_blackhole_update_pmtu,
126 .entry_size = sizeof(struct rt6_info),
127 .entries = ATOMIC_INIT(0),
128 };
129
130 static struct rt6_info ip6_null_entry_template = {
131 .u = {
132 .dst = {
133 .__refcnt = ATOMIC_INIT(1),
134 .__use = 1,
135 .obsolete = -1,
136 .error = -ENETUNREACH,
137 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
138 .input = ip6_pkt_discard,
139 .output = ip6_pkt_discard_out,
140 }
141 },
142 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
143 .rt6i_metric = ~(u32) 0,
144 .rt6i_ref = ATOMIC_INIT(1),
145 };
146
147 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
148
149 static int ip6_pkt_prohibit(struct sk_buff *skb);
150 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
151
152 struct rt6_info ip6_prohibit_entry_template = {
153 .u = {
154 .dst = {
155 .__refcnt = ATOMIC_INIT(1),
156 .__use = 1,
157 .obsolete = -1,
158 .error = -EACCES,
159 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
160 .input = ip6_pkt_prohibit,
161 .output = ip6_pkt_prohibit_out,
162 }
163 },
164 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
165 .rt6i_metric = ~(u32) 0,
166 .rt6i_ref = ATOMIC_INIT(1),
167 };
168
169 static struct rt6_info ip6_blk_hole_entry_template = {
170 .u = {
171 .dst = {
172 .__refcnt = ATOMIC_INIT(1),
173 .__use = 1,
174 .obsolete = -1,
175 .error = -EINVAL,
176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
177 .input = dst_discard,
178 .output = dst_discard,
179 }
180 },
181 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
182 .rt6i_metric = ~(u32) 0,
183 .rt6i_ref = ATOMIC_INIT(1),
184 };
185
186 #endif
187
188 /* allocate dst with ip6_dst_ops */
189 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
190 {
191 return (struct rt6_info *)dst_alloc(ops);
192 }
193
194 static void ip6_dst_destroy(struct dst_entry *dst)
195 {
196 struct rt6_info *rt = (struct rt6_info *)dst;
197 struct inet6_dev *idev = rt->rt6i_idev;
198
199 if (idev != NULL) {
200 rt->rt6i_idev = NULL;
201 in6_dev_put(idev);
202 }
203 }
204
205 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
206 int how)
207 {
208 struct rt6_info *rt = (struct rt6_info *)dst;
209 struct inet6_dev *idev = rt->rt6i_idev;
210 struct net_device *loopback_dev =
211 dev_net(dev)->loopback_dev;
212
213 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
214 struct inet6_dev *loopback_idev =
215 in6_dev_get(loopback_dev);
216 if (loopback_idev != NULL) {
217 rt->rt6i_idev = loopback_idev;
218 in6_dev_put(idev);
219 }
220 }
221 }
222
223 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
224 {
225 return (rt->rt6i_flags & RTF_EXPIRES &&
226 time_after(jiffies, rt->rt6i_expires));
227 }
228
229 static inline int rt6_need_strict(struct in6_addr *daddr)
230 {
231 return (ipv6_addr_type(daddr) &
232 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
233 }
234
235 /*
236 * Route lookup. Any table->tb6_lock is implied.
237 */
238
239 static inline struct rt6_info *rt6_device_match(struct net *net,
240 struct rt6_info *rt,
241 int oif,
242 int strict)
243 {
244 struct rt6_info *local = NULL;
245 struct rt6_info *sprt;
246
247 if (oif) {
248 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
249 struct net_device *dev = sprt->rt6i_dev;
250 if (dev->ifindex == oif)
251 return sprt;
252 if (dev->flags & IFF_LOOPBACK) {
253 if (sprt->rt6i_idev == NULL ||
254 sprt->rt6i_idev->dev->ifindex != oif) {
255 if (strict && oif)
256 continue;
257 if (local && (!oif ||
258 local->rt6i_idev->dev->ifindex == oif))
259 continue;
260 }
261 local = sprt;
262 }
263 }
264
265 if (local)
266 return local;
267
268 if (strict)
269 return net->ipv6.ip6_null_entry;
270 }
271 return rt;
272 }
273
274 #ifdef CONFIG_IPV6_ROUTER_PREF
275 static void rt6_probe(struct rt6_info *rt)
276 {
277 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
278 /*
279 * Okay, this does not seem to be appropriate
280 * for now, however, we need to check if it
281 * is really so; aka Router Reachability Probing.
282 *
283 * Router Reachability Probe MUST be rate-limited
284 * to no more than one per minute.
285 */
286 if (!neigh || (neigh->nud_state & NUD_VALID))
287 return;
288 read_lock_bh(&neigh->lock);
289 if (!(neigh->nud_state & NUD_VALID) &&
290 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
291 struct in6_addr mcaddr;
292 struct in6_addr *target;
293
294 neigh->updated = jiffies;
295 read_unlock_bh(&neigh->lock);
296
297 target = (struct in6_addr *)&neigh->primary_key;
298 addrconf_addr_solict_mult(target, &mcaddr);
299 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
300 } else
301 read_unlock_bh(&neigh->lock);
302 }
303 #else
304 static inline void rt6_probe(struct rt6_info *rt)
305 {
306 return;
307 }
308 #endif
309
310 /*
311 * Default Router Selection (RFC 2461 6.3.6)
312 */
313 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
314 {
315 struct net_device *dev = rt->rt6i_dev;
316 if (!oif || dev->ifindex == oif)
317 return 2;
318 if ((dev->flags & IFF_LOOPBACK) &&
319 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
320 return 1;
321 return 0;
322 }
323
324 static inline int rt6_check_neigh(struct rt6_info *rt)
325 {
326 struct neighbour *neigh = rt->rt6i_nexthop;
327 int m;
328 if (rt->rt6i_flags & RTF_NONEXTHOP ||
329 !(rt->rt6i_flags & RTF_GATEWAY))
330 m = 1;
331 else if (neigh) {
332 read_lock_bh(&neigh->lock);
333 if (neigh->nud_state & NUD_VALID)
334 m = 2;
335 #ifdef CONFIG_IPV6_ROUTER_PREF
336 else if (neigh->nud_state & NUD_FAILED)
337 m = 0;
338 #endif
339 else
340 m = 1;
341 read_unlock_bh(&neigh->lock);
342 } else
343 m = 0;
344 return m;
345 }
346
347 static int rt6_score_route(struct rt6_info *rt, int oif,
348 int strict)
349 {
350 int m, n;
351
352 m = rt6_check_dev(rt, oif);
353 if (!m && (strict & RT6_LOOKUP_F_IFACE))
354 return -1;
355 #ifdef CONFIG_IPV6_ROUTER_PREF
356 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
357 #endif
358 n = rt6_check_neigh(rt);
359 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
360 return -1;
361 return m;
362 }
363
364 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
365 int *mpri, struct rt6_info *match)
366 {
367 int m;
368
369 if (rt6_check_expired(rt))
370 goto out;
371
372 m = rt6_score_route(rt, oif, strict);
373 if (m < 0)
374 goto out;
375
376 if (m > *mpri) {
377 if (strict & RT6_LOOKUP_F_REACHABLE)
378 rt6_probe(match);
379 *mpri = m;
380 match = rt;
381 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
382 rt6_probe(rt);
383 }
384
385 out:
386 return match;
387 }
388
389 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
390 struct rt6_info *rr_head,
391 u32 metric, int oif, int strict)
392 {
393 struct rt6_info *rt, *match;
394 int mpri = -1;
395
396 match = NULL;
397 for (rt = rr_head; rt && rt->rt6i_metric == metric;
398 rt = rt->u.dst.rt6_next)
399 match = find_match(rt, oif, strict, &mpri, match);
400 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
401 rt = rt->u.dst.rt6_next)
402 match = find_match(rt, oif, strict, &mpri, match);
403
404 return match;
405 }
406
407 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
408 {
409 struct rt6_info *match, *rt0;
410 struct net *net;
411
412 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
413 __func__, fn->leaf, oif);
414
415 rt0 = fn->rr_ptr;
416 if (!rt0)
417 fn->rr_ptr = rt0 = fn->leaf;
418
419 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
420
421 if (!match &&
422 (strict & RT6_LOOKUP_F_REACHABLE)) {
423 struct rt6_info *next = rt0->u.dst.rt6_next;
424
425 /* no entries matched; do round-robin */
426 if (!next || next->rt6i_metric != rt0->rt6i_metric)
427 next = fn->leaf;
428
429 if (next != rt0)
430 fn->rr_ptr = next;
431 }
432
433 RT6_TRACE("%s() => %p\n",
434 __func__, match);
435
436 net = dev_net(rt0->rt6i_dev);
437 return (match ? match : net->ipv6.ip6_null_entry);
438 }
439
440 #ifdef CONFIG_IPV6_ROUTE_INFO
441 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
442 struct in6_addr *gwaddr)
443 {
444 struct net *net = dev_net(dev);
445 struct route_info *rinfo = (struct route_info *) opt;
446 struct in6_addr prefix_buf, *prefix;
447 unsigned int pref;
448 u32 lifetime;
449 struct rt6_info *rt;
450
451 if (len < sizeof(struct route_info)) {
452 return -EINVAL;
453 }
454
455 /* Sanity check for prefix_len and length */
456 if (rinfo->length > 3) {
457 return -EINVAL;
458 } else if (rinfo->prefix_len > 128) {
459 return -EINVAL;
460 } else if (rinfo->prefix_len > 64) {
461 if (rinfo->length < 2) {
462 return -EINVAL;
463 }
464 } else if (rinfo->prefix_len > 0) {
465 if (rinfo->length < 1) {
466 return -EINVAL;
467 }
468 }
469
470 pref = rinfo->route_pref;
471 if (pref == ICMPV6_ROUTER_PREF_INVALID)
472 pref = ICMPV6_ROUTER_PREF_MEDIUM;
473
474 lifetime = ntohl(rinfo->lifetime);
475 if (lifetime == 0xffffffff) {
476 /* infinity */
477 } else if (lifetime > 0x7fffffff/HZ) {
478 /* Avoid arithmetic overflow */
479 lifetime = 0x7fffffff/HZ - 1;
480 }
481
482 if (rinfo->length == 3)
483 prefix = (struct in6_addr *)rinfo->prefix;
484 else {
485 /* this function is safe */
486 ipv6_addr_prefix(&prefix_buf,
487 (struct in6_addr *)rinfo->prefix,
488 rinfo->prefix_len);
489 prefix = &prefix_buf;
490 }
491
492 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
493 dev->ifindex);
494
495 if (rt && !lifetime) {
496 ip6_del_rt(rt);
497 rt = NULL;
498 }
499
500 if (!rt && lifetime)
501 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
502 pref);
503 else if (rt)
504 rt->rt6i_flags = RTF_ROUTEINFO |
505 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
506
507 if (rt) {
508 if (lifetime == 0xffffffff) {
509 rt->rt6i_flags &= ~RTF_EXPIRES;
510 } else {
511 rt->rt6i_expires = jiffies + HZ * lifetime;
512 rt->rt6i_flags |= RTF_EXPIRES;
513 }
514 dst_release(&rt->u.dst);
515 }
516 return 0;
517 }
518 #endif
519
520 #define BACKTRACK(__net, saddr) \
521 do { \
522 if (rt == __net->ipv6.ip6_null_entry) { \
523 struct fib6_node *pn; \
524 while (1) { \
525 if (fn->fn_flags & RTN_TL_ROOT) \
526 goto out; \
527 pn = fn->parent; \
528 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
529 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
530 else \
531 fn = pn; \
532 if (fn->fn_flags & RTN_RTINFO) \
533 goto restart; \
534 } \
535 } \
536 } while(0)
537
538 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
539 struct fib6_table *table,
540 struct flowi *fl, int flags)
541 {
542 struct fib6_node *fn;
543 struct rt6_info *rt;
544
545 read_lock_bh(&table->tb6_lock);
546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
547 restart:
548 rt = fn->leaf;
549 rt = rt6_device_match(net, rt, fl->oif, flags);
550 BACKTRACK(net, &fl->fl6_src);
551 out:
552 dst_use(&rt->u.dst, jiffies);
553 read_unlock_bh(&table->tb6_lock);
554 return rt;
555
556 }
557
558 struct rt6_info *rt6_lookup(struct net *net, struct in6_addr *daddr,
559 struct in6_addr *saddr, int oif, int strict)
560 {
561 struct flowi fl = {
562 .oif = oif,
563 .nl_u = {
564 .ip6_u = {
565 .daddr = *daddr,
566 },
567 },
568 };
569 struct dst_entry *dst;
570 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
571
572 if (saddr) {
573 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
574 flags |= RT6_LOOKUP_F_HAS_SADDR;
575 }
576
577 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
578 if (dst->error == 0)
579 return (struct rt6_info *) dst;
580
581 dst_release(dst);
582
583 return NULL;
584 }
585
586 EXPORT_SYMBOL(rt6_lookup);
587
588 /* ip6_ins_rt is called with FREE table->tb6_lock.
589 It takes new route entry, the addition fails by any reason the
590 route is freed. In any case, if caller does not hold it, it may
591 be destroyed.
592 */
593
594 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
595 {
596 int err;
597 struct fib6_table *table;
598
599 table = rt->rt6i_table;
600 write_lock_bh(&table->tb6_lock);
601 err = fib6_add(&table->tb6_root, rt, info);
602 write_unlock_bh(&table->tb6_lock);
603
604 return err;
605 }
606
607 int ip6_ins_rt(struct rt6_info *rt)
608 {
609 struct nl_info info = {
610 .nl_net = dev_net(rt->rt6i_dev),
611 };
612 return __ip6_ins_rt(rt, &info);
613 }
614
615 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
616 struct in6_addr *saddr)
617 {
618 struct rt6_info *rt;
619
620 /*
621 * Clone the route.
622 */
623
624 rt = ip6_rt_copy(ort);
625
626 if (rt) {
627 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
628 if (rt->rt6i_dst.plen != 128 &&
629 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
630 rt->rt6i_flags |= RTF_ANYCAST;
631 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
632 }
633
634 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
635 rt->rt6i_dst.plen = 128;
636 rt->rt6i_flags |= RTF_CACHE;
637 rt->u.dst.flags |= DST_HOST;
638
639 #ifdef CONFIG_IPV6_SUBTREES
640 if (rt->rt6i_src.plen && saddr) {
641 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
642 rt->rt6i_src.plen = 128;
643 }
644 #endif
645
646 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
647
648 }
649
650 return rt;
651 }
652
653 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
654 {
655 struct rt6_info *rt = ip6_rt_copy(ort);
656 if (rt) {
657 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
658 rt->rt6i_dst.plen = 128;
659 rt->rt6i_flags |= RTF_CACHE;
660 rt->u.dst.flags |= DST_HOST;
661 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
662 }
663 return rt;
664 }
665
666 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
667 struct flowi *fl, int flags)
668 {
669 struct fib6_node *fn;
670 struct rt6_info *rt, *nrt;
671 int strict = 0;
672 int attempts = 3;
673 int err;
674 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
675
676 strict |= flags & RT6_LOOKUP_F_IFACE;
677
678 relookup:
679 read_lock_bh(&table->tb6_lock);
680
681 restart_2:
682 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
683
684 restart:
685 rt = rt6_select(fn, oif, strict | reachable);
686
687 BACKTRACK(net, &fl->fl6_src);
688 if (rt == net->ipv6.ip6_null_entry ||
689 rt->rt6i_flags & RTF_CACHE)
690 goto out;
691
692 dst_hold(&rt->u.dst);
693 read_unlock_bh(&table->tb6_lock);
694
695 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
696 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
697 else {
698 #if CLONE_OFFLINK_ROUTE
699 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
700 #else
701 goto out2;
702 #endif
703 }
704
705 dst_release(&rt->u.dst);
706 rt = nrt ? : net->ipv6.ip6_null_entry;
707
708 dst_hold(&rt->u.dst);
709 if (nrt) {
710 err = ip6_ins_rt(nrt);
711 if (!err)
712 goto out2;
713 }
714
715 if (--attempts <= 0)
716 goto out2;
717
718 /*
719 * Race condition! In the gap, when table->tb6_lock was
720 * released someone could insert this route. Relookup.
721 */
722 dst_release(&rt->u.dst);
723 goto relookup;
724
725 out:
726 if (reachable) {
727 reachable = 0;
728 goto restart_2;
729 }
730 dst_hold(&rt->u.dst);
731 read_unlock_bh(&table->tb6_lock);
732 out2:
733 rt->u.dst.lastuse = jiffies;
734 rt->u.dst.__use++;
735
736 return rt;
737 }
738
739 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
740 struct flowi *fl, int flags)
741 {
742 return ip6_pol_route(net, table, fl->iif, fl, flags);
743 }
744
745 void ip6_route_input(struct sk_buff *skb)
746 {
747 struct ipv6hdr *iph = ipv6_hdr(skb);
748 struct net *net = dev_net(skb->dev);
749 int flags = RT6_LOOKUP_F_HAS_SADDR;
750 struct flowi fl = {
751 .iif = skb->dev->ifindex,
752 .nl_u = {
753 .ip6_u = {
754 .daddr = iph->daddr,
755 .saddr = iph->saddr,
756 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
757 },
758 },
759 .mark = skb->mark,
760 .proto = iph->nexthdr,
761 };
762
763 if (rt6_need_strict(&iph->daddr))
764 flags |= RT6_LOOKUP_F_IFACE;
765
766 skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);
767 }
768
769 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
770 struct flowi *fl, int flags)
771 {
772 return ip6_pol_route(net, table, fl->oif, fl, flags);
773 }
774
775 struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
776 struct flowi *fl)
777 {
778 int flags = 0;
779
780 if (rt6_need_strict(&fl->fl6_dst))
781 flags |= RT6_LOOKUP_F_IFACE;
782
783 if (!ipv6_addr_any(&fl->fl6_src))
784 flags |= RT6_LOOKUP_F_HAS_SADDR;
785 else if (sk) {
786 unsigned int prefs = inet6_sk(sk)->srcprefs;
787 if (prefs & IPV6_PREFER_SRC_TMP)
788 flags |= RT6_LOOKUP_F_SRCPREF_TMP;
789 if (prefs & IPV6_PREFER_SRC_PUBLIC)
790 flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
791 if (prefs & IPV6_PREFER_SRC_COA)
792 flags |= RT6_LOOKUP_F_SRCPREF_COA;
793 }
794
795 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
796 }
797
798 EXPORT_SYMBOL(ip6_route_output);
799
800 int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
801 {
802 struct rt6_info *ort = (struct rt6_info *) *dstp;
803 struct rt6_info *rt = (struct rt6_info *)
804 dst_alloc(&ip6_dst_blackhole_ops);
805 struct dst_entry *new = NULL;
806
807 if (rt) {
808 new = &rt->u.dst;
809
810 atomic_set(&new->__refcnt, 1);
811 new->__use = 1;
812 new->input = dst_discard;
813 new->output = dst_discard;
814
815 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
816 new->dev = ort->u.dst.dev;
817 if (new->dev)
818 dev_hold(new->dev);
819 rt->rt6i_idev = ort->rt6i_idev;
820 if (rt->rt6i_idev)
821 in6_dev_hold(rt->rt6i_idev);
822 rt->rt6i_expires = 0;
823
824 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
825 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
826 rt->rt6i_metric = 0;
827
828 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
829 #ifdef CONFIG_IPV6_SUBTREES
830 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
831 #endif
832
833 dst_free(new);
834 }
835
836 dst_release(*dstp);
837 *dstp = new;
838 return (new ? 0 : -ENOMEM);
839 }
840 EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
841
842 /*
843 * Destination cache support functions
844 */
845
846 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
847 {
848 struct rt6_info *rt;
849
850 rt = (struct rt6_info *) dst;
851
852 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
853 return dst;
854
855 return NULL;
856 }
857
858 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
859 {
860 struct rt6_info *rt = (struct rt6_info *) dst;
861
862 if (rt) {
863 if (rt->rt6i_flags & RTF_CACHE)
864 ip6_del_rt(rt);
865 else
866 dst_release(dst);
867 }
868 return NULL;
869 }
870
871 static void ip6_link_failure(struct sk_buff *skb)
872 {
873 struct rt6_info *rt;
874
875 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
876
877 rt = (struct rt6_info *) skb->dst;
878 if (rt) {
879 if (rt->rt6i_flags&RTF_CACHE) {
880 dst_set_expires(&rt->u.dst, 0);
881 rt->rt6i_flags |= RTF_EXPIRES;
882 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
883 rt->rt6i_node->fn_sernum = -1;
884 }
885 }
886
887 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
888 {
889 struct rt6_info *rt6 = (struct rt6_info*)dst;
890
891 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
892 rt6->rt6i_flags |= RTF_MODIFIED;
893 if (mtu < IPV6_MIN_MTU) {
894 mtu = IPV6_MIN_MTU;
895 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
896 }
897 dst->metrics[RTAX_MTU-1] = mtu;
898 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
899 }
900 }
901
902 static int ipv6_get_mtu(struct net_device *dev);
903
904 static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
905 {
906 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
907
908 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
909 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
910
911 /*
912 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
913 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
914 * IPV6_MAXPLEN is also valid and means: "any MSS,
915 * rely only on pmtu discovery"
916 */
917 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
918 mtu = IPV6_MAXPLEN;
919 return mtu;
920 }
921
922 static struct dst_entry *icmp6_dst_gc_list;
923 static DEFINE_SPINLOCK(icmp6_dst_lock);
924
925 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
926 struct neighbour *neigh,
927 struct in6_addr *addr)
928 {
929 struct rt6_info *rt;
930 struct inet6_dev *idev = in6_dev_get(dev);
931 struct net *net = dev_net(dev);
932
933 if (unlikely(idev == NULL))
934 return NULL;
935
936 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
937 if (unlikely(rt == NULL)) {
938 in6_dev_put(idev);
939 goto out;
940 }
941
942 dev_hold(dev);
943 if (neigh)
944 neigh_hold(neigh);
945 else
946 neigh = ndisc_get_neigh(dev, addr);
947
948 rt->rt6i_dev = dev;
949 rt->rt6i_idev = idev;
950 rt->rt6i_nexthop = neigh;
951 atomic_set(&rt->u.dst.__refcnt, 1);
952 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
953 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
954 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
955 rt->u.dst.output = ip6_output;
956
957 #if 0 /* there's no chance to use these for ndisc */
958 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
959 ? DST_HOST
960 : 0;
961 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
962 rt->rt6i_dst.plen = 128;
963 #endif
964
965 spin_lock_bh(&icmp6_dst_lock);
966 rt->u.dst.next = icmp6_dst_gc_list;
967 icmp6_dst_gc_list = &rt->u.dst;
968 spin_unlock_bh(&icmp6_dst_lock);
969
970 fib6_force_start_gc(net);
971
972 out:
973 return &rt->u.dst;
974 }
975
976 int icmp6_dst_gc(int *more)
977 {
978 struct dst_entry *dst, *next, **pprev;
979 int freed;
980
981 next = NULL;
982 freed = 0;
983
984 spin_lock_bh(&icmp6_dst_lock);
985 pprev = &icmp6_dst_gc_list;
986
987 while ((dst = *pprev) != NULL) {
988 if (!atomic_read(&dst->__refcnt)) {
989 *pprev = dst->next;
990 dst_free(dst);
991 freed++;
992 } else {
993 pprev = &dst->next;
994 (*more)++;
995 }
996 }
997
998 spin_unlock_bh(&icmp6_dst_lock);
999
1000 return freed;
1001 }
1002
1003 static int ip6_dst_gc(struct dst_ops *ops)
1004 {
1005 unsigned long now = jiffies;
1006 struct net *net = ops->dst_net;
1007 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1008 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1009 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1010 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1011 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1012
1013 if (time_after(rt_last_gc + rt_min_interval, now) &&
1014 atomic_read(&ops->entries) <= rt_max_size)
1015 goto out;
1016
1017 net->ipv6.ip6_rt_gc_expire++;
1018 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1019 net->ipv6.ip6_rt_last_gc = now;
1020 if (atomic_read(&ops->entries) < ops->gc_thresh)
1021 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1022 out:
1023 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1024 return (atomic_read(&ops->entries) > rt_max_size);
1025 }
1026
1027 /* Clean host part of a prefix. Not necessary in radix tree,
1028 but results in cleaner routing tables.
1029
1030 Remove it only when all the things will work!
1031 */
1032
1033 static int ipv6_get_mtu(struct net_device *dev)
1034 {
1035 int mtu = IPV6_MIN_MTU;
1036 struct inet6_dev *idev;
1037
1038 idev = in6_dev_get(dev);
1039 if (idev) {
1040 mtu = idev->cnf.mtu6;
1041 in6_dev_put(idev);
1042 }
1043 return mtu;
1044 }
1045
1046 int ip6_dst_hoplimit(struct dst_entry *dst)
1047 {
1048 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1049 if (hoplimit < 0) {
1050 struct net_device *dev = dst->dev;
1051 struct inet6_dev *idev = in6_dev_get(dev);
1052 if (idev) {
1053 hoplimit = idev->cnf.hop_limit;
1054 in6_dev_put(idev);
1055 } else
1056 hoplimit = ipv6_devconf.hop_limit;
1057 }
1058 return hoplimit;
1059 }
1060
1061 /*
1062 *
1063 */
1064
1065 int ip6_route_add(struct fib6_config *cfg)
1066 {
1067 int err;
1068 struct net *net = cfg->fc_nlinfo.nl_net;
1069 struct rt6_info *rt = NULL;
1070 struct net_device *dev = NULL;
1071 struct inet6_dev *idev = NULL;
1072 struct fib6_table *table;
1073 int addr_type;
1074
1075 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1076 return -EINVAL;
1077 #ifndef CONFIG_IPV6_SUBTREES
1078 if (cfg->fc_src_len)
1079 return -EINVAL;
1080 #endif
1081 if (cfg->fc_ifindex) {
1082 err = -ENODEV;
1083 dev = dev_get_by_index(net, cfg->fc_ifindex);
1084 if (!dev)
1085 goto out;
1086 idev = in6_dev_get(dev);
1087 if (!idev)
1088 goto out;
1089 }
1090
1091 if (cfg->fc_metric == 0)
1092 cfg->fc_metric = IP6_RT_PRIO_USER;
1093
1094 table = fib6_new_table(net, cfg->fc_table);
1095 if (table == NULL) {
1096 err = -ENOBUFS;
1097 goto out;
1098 }
1099
1100 rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1101
1102 if (rt == NULL) {
1103 err = -ENOMEM;
1104 goto out;
1105 }
1106
1107 rt->u.dst.obsolete = -1;
1108 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires);
1109
1110 if (cfg->fc_protocol == RTPROT_UNSPEC)
1111 cfg->fc_protocol = RTPROT_BOOT;
1112 rt->rt6i_protocol = cfg->fc_protocol;
1113
1114 addr_type = ipv6_addr_type(&cfg->fc_dst);
1115
1116 if (addr_type & IPV6_ADDR_MULTICAST)
1117 rt->u.dst.input = ip6_mc_input;
1118 else
1119 rt->u.dst.input = ip6_forward;
1120
1121 rt->u.dst.output = ip6_output;
1122
1123 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1124 rt->rt6i_dst.plen = cfg->fc_dst_len;
1125 if (rt->rt6i_dst.plen == 128)
1126 rt->u.dst.flags = DST_HOST;
1127
1128 #ifdef CONFIG_IPV6_SUBTREES
1129 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1130 rt->rt6i_src.plen = cfg->fc_src_len;
1131 #endif
1132
1133 rt->rt6i_metric = cfg->fc_metric;
1134
1135 /* We cannot add true routes via loopback here,
1136 they would result in kernel looping; promote them to reject routes
1137 */
1138 if ((cfg->fc_flags & RTF_REJECT) ||
1139 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1140 /* hold loopback dev/idev if we haven't done so. */
1141 if (dev != net->loopback_dev) {
1142 if (dev) {
1143 dev_put(dev);
1144 in6_dev_put(idev);
1145 }
1146 dev = net->loopback_dev;
1147 dev_hold(dev);
1148 idev = in6_dev_get(dev);
1149 if (!idev) {
1150 err = -ENODEV;
1151 goto out;
1152 }
1153 }
1154 rt->u.dst.output = ip6_pkt_discard_out;
1155 rt->u.dst.input = ip6_pkt_discard;
1156 rt->u.dst.error = -ENETUNREACH;
1157 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1158 goto install_route;
1159 }
1160
1161 if (cfg->fc_flags & RTF_GATEWAY) {
1162 struct in6_addr *gw_addr;
1163 int gwa_type;
1164
1165 gw_addr = &cfg->fc_gateway;
1166 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1167 gwa_type = ipv6_addr_type(gw_addr);
1168
1169 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1170 struct rt6_info *grt;
1171
1172 /* IPv6 strictly inhibits using not link-local
1173 addresses as nexthop address.
1174 Otherwise, router will not able to send redirects.
1175 It is very good, but in some (rare!) circumstances
1176 (SIT, PtP, NBMA NOARP links) it is handy to allow
1177 some exceptions. --ANK
1178 */
1179 err = -EINVAL;
1180 if (!(gwa_type&IPV6_ADDR_UNICAST))
1181 goto out;
1182
1183 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1184
1185 err = -EHOSTUNREACH;
1186 if (grt == NULL)
1187 goto out;
1188 if (dev) {
1189 if (dev != grt->rt6i_dev) {
1190 dst_release(&grt->u.dst);
1191 goto out;
1192 }
1193 } else {
1194 dev = grt->rt6i_dev;
1195 idev = grt->rt6i_idev;
1196 dev_hold(dev);
1197 in6_dev_hold(grt->rt6i_idev);
1198 }
1199 if (!(grt->rt6i_flags&RTF_GATEWAY))
1200 err = 0;
1201 dst_release(&grt->u.dst);
1202
1203 if (err)
1204 goto out;
1205 }
1206 err = -EINVAL;
1207 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1208 goto out;
1209 }
1210
1211 err = -ENODEV;
1212 if (dev == NULL)
1213 goto out;
1214
1215 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1216 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1217 if (IS_ERR(rt->rt6i_nexthop)) {
1218 err = PTR_ERR(rt->rt6i_nexthop);
1219 rt->rt6i_nexthop = NULL;
1220 goto out;
1221 }
1222 }
1223
1224 rt->rt6i_flags = cfg->fc_flags;
1225
1226 install_route:
1227 if (cfg->fc_mx) {
1228 struct nlattr *nla;
1229 int remaining;
1230
1231 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1232 int type = nla_type(nla);
1233
1234 if (type) {
1235 if (type > RTAX_MAX) {
1236 err = -EINVAL;
1237 goto out;
1238 }
1239
1240 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1241 }
1242 }
1243 }
1244
1245 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1246 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1247 if (!rt->u.dst.metrics[RTAX_MTU-1])
1248 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1249 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1250 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1251 rt->u.dst.dev = dev;
1252 rt->rt6i_idev = idev;
1253 rt->rt6i_table = table;
1254
1255 cfg->fc_nlinfo.nl_net = dev_net(dev);
1256
1257 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1258
1259 out:
1260 if (dev)
1261 dev_put(dev);
1262 if (idev)
1263 in6_dev_put(idev);
1264 if (rt)
1265 dst_free(&rt->u.dst);
1266 return err;
1267 }
1268
1269 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1270 {
1271 int err;
1272 struct fib6_table *table;
1273 struct net *net = dev_net(rt->rt6i_dev);
1274
1275 if (rt == net->ipv6.ip6_null_entry)
1276 return -ENOENT;
1277
1278 table = rt->rt6i_table;
1279 write_lock_bh(&table->tb6_lock);
1280
1281 err = fib6_del(rt, info);
1282 dst_release(&rt->u.dst);
1283
1284 write_unlock_bh(&table->tb6_lock);
1285
1286 return err;
1287 }
1288
1289 int ip6_del_rt(struct rt6_info *rt)
1290 {
1291 struct nl_info info = {
1292 .nl_net = dev_net(rt->rt6i_dev),
1293 };
1294 return __ip6_del_rt(rt, &info);
1295 }
1296
1297 static int ip6_route_del(struct fib6_config *cfg)
1298 {
1299 struct fib6_table *table;
1300 struct fib6_node *fn;
1301 struct rt6_info *rt;
1302 int err = -ESRCH;
1303
1304 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1305 if (table == NULL)
1306 return err;
1307
1308 read_lock_bh(&table->tb6_lock);
1309
1310 fn = fib6_locate(&table->tb6_root,
1311 &cfg->fc_dst, cfg->fc_dst_len,
1312 &cfg->fc_src, cfg->fc_src_len);
1313
1314 if (fn) {
1315 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1316 if (cfg->fc_ifindex &&
1317 (rt->rt6i_dev == NULL ||
1318 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1319 continue;
1320 if (cfg->fc_flags & RTF_GATEWAY &&
1321 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1322 continue;
1323 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1324 continue;
1325 dst_hold(&rt->u.dst);
1326 read_unlock_bh(&table->tb6_lock);
1327
1328 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1329 }
1330 }
1331 read_unlock_bh(&table->tb6_lock);
1332
1333 return err;
1334 }
1335
1336 /*
1337 * Handle redirects
1338 */
1339 struct ip6rd_flowi {
1340 struct flowi fl;
1341 struct in6_addr gateway;
1342 };
1343
1344 static struct rt6_info *__ip6_route_redirect(struct net *net,
1345 struct fib6_table *table,
1346 struct flowi *fl,
1347 int flags)
1348 {
1349 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1350 struct rt6_info *rt;
1351 struct fib6_node *fn;
1352
1353 /*
1354 * Get the "current" route for this destination and
1355 * check if the redirect has come from approriate router.
1356 *
1357 * RFC 2461 specifies that redirects should only be
1358 * accepted if they come from the nexthop to the target.
1359 * Due to the way the routes are chosen, this notion
1360 * is a bit fuzzy and one might need to check all possible
1361 * routes.
1362 */
1363
1364 read_lock_bh(&table->tb6_lock);
1365 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1366 restart:
1367 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1368 /*
1369 * Current route is on-link; redirect is always invalid.
1370 *
1371 * Seems, previous statement is not true. It could
1372 * be node, which looks for us as on-link (f.e. proxy ndisc)
1373 * But then router serving it might decide, that we should
1374 * know truth 8)8) --ANK (980726).
1375 */
1376 if (rt6_check_expired(rt))
1377 continue;
1378 if (!(rt->rt6i_flags & RTF_GATEWAY))
1379 continue;
1380 if (fl->oif != rt->rt6i_dev->ifindex)
1381 continue;
1382 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1383 continue;
1384 break;
1385 }
1386
1387 if (!rt)
1388 rt = net->ipv6.ip6_null_entry;
1389 BACKTRACK(net, &fl->fl6_src);
1390 out:
1391 dst_hold(&rt->u.dst);
1392
1393 read_unlock_bh(&table->tb6_lock);
1394
1395 return rt;
1396 };
1397
1398 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1399 struct in6_addr *src,
1400 struct in6_addr *gateway,
1401 struct net_device *dev)
1402 {
1403 int flags = RT6_LOOKUP_F_HAS_SADDR;
1404 struct net *net = dev_net(dev);
1405 struct ip6rd_flowi rdfl = {
1406 .fl = {
1407 .oif = dev->ifindex,
1408 .nl_u = {
1409 .ip6_u = {
1410 .daddr = *dest,
1411 .saddr = *src,
1412 },
1413 },
1414 },
1415 .gateway = *gateway,
1416 };
1417
1418 if (rt6_need_strict(dest))
1419 flags |= RT6_LOOKUP_F_IFACE;
1420
1421 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1422 flags, __ip6_route_redirect);
1423 }
1424
1425 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1426 struct in6_addr *saddr,
1427 struct neighbour *neigh, u8 *lladdr, int on_link)
1428 {
1429 struct rt6_info *rt, *nrt = NULL;
1430 struct netevent_redirect netevent;
1431 struct net *net = dev_net(neigh->dev);
1432
1433 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1434
1435 if (rt == net->ipv6.ip6_null_entry) {
1436 if (net_ratelimit())
1437 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1438 "for redirect target\n");
1439 goto out;
1440 }
1441
1442 /*
1443 * We have finally decided to accept it.
1444 */
1445
1446 neigh_update(neigh, lladdr, NUD_STALE,
1447 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1448 NEIGH_UPDATE_F_OVERRIDE|
1449 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1450 NEIGH_UPDATE_F_ISROUTER))
1451 );
1452
1453 /*
1454 * Redirect received -> path was valid.
1455 * Look, redirects are sent only in response to data packets,
1456 * so that this nexthop apparently is reachable. --ANK
1457 */
1458 dst_confirm(&rt->u.dst);
1459
1460 /* Duplicate redirect: silently ignore. */
1461 if (neigh == rt->u.dst.neighbour)
1462 goto out;
1463
1464 nrt = ip6_rt_copy(rt);
1465 if (nrt == NULL)
1466 goto out;
1467
1468 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1469 if (on_link)
1470 nrt->rt6i_flags &= ~RTF_GATEWAY;
1471
1472 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1473 nrt->rt6i_dst.plen = 128;
1474 nrt->u.dst.flags |= DST_HOST;
1475
1476 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1477 nrt->rt6i_nexthop = neigh_clone(neigh);
1478 /* Reset pmtu, it may be better */
1479 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1480 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1481 dst_mtu(&nrt->u.dst));
1482
1483 if (ip6_ins_rt(nrt))
1484 goto out;
1485
1486 netevent.old = &rt->u.dst;
1487 netevent.new = &nrt->u.dst;
1488 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1489
1490 if (rt->rt6i_flags&RTF_CACHE) {
1491 ip6_del_rt(rt);
1492 return;
1493 }
1494
1495 out:
1496 dst_release(&rt->u.dst);
1497 return;
1498 }
1499
1500 /*
1501 * Handle ICMP "packet too big" messages
1502 * i.e. Path MTU discovery
1503 */
1504
1505 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1506 struct net_device *dev, u32 pmtu)
1507 {
1508 struct rt6_info *rt, *nrt;
1509 struct net *net = dev_net(dev);
1510 int allfrag = 0;
1511
1512 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1513 if (rt == NULL)
1514 return;
1515
1516 if (pmtu >= dst_mtu(&rt->u.dst))
1517 goto out;
1518
1519 if (pmtu < IPV6_MIN_MTU) {
1520 /*
1521 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1522 * MTU (1280) and a fragment header should always be included
1523 * after a node receiving Too Big message reporting PMTU is
1524 * less than the IPv6 Minimum Link MTU.
1525 */
1526 pmtu = IPV6_MIN_MTU;
1527 allfrag = 1;
1528 }
1529
1530 /* New mtu received -> path was valid.
1531 They are sent only in response to data packets,
1532 so that this nexthop apparently is reachable. --ANK
1533 */
1534 dst_confirm(&rt->u.dst);
1535
1536 /* Host route. If it is static, it would be better
1537 not to override it, but add new one, so that
1538 when cache entry will expire old pmtu
1539 would return automatically.
1540 */
1541 if (rt->rt6i_flags & RTF_CACHE) {
1542 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1543 if (allfrag)
1544 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1545 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1546 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1547 goto out;
1548 }
1549
1550 /* Network route.
1551 Two cases are possible:
1552 1. It is connected route. Action: COW
1553 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1554 */
1555 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1556 nrt = rt6_alloc_cow(rt, daddr, saddr);
1557 else
1558 nrt = rt6_alloc_clone(rt, daddr);
1559
1560 if (nrt) {
1561 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1562 if (allfrag)
1563 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1564
1565 /* According to RFC 1981, detecting PMTU increase shouldn't be
1566 * happened within 5 mins, the recommended timer is 10 mins.
1567 * Here this route expiration time is set to ip6_rt_mtu_expires
1568 * which is 10 mins. After 10 mins the decreased pmtu is expired
1569 * and detecting PMTU increase will be automatically happened.
1570 */
1571 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1572 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1573
1574 ip6_ins_rt(nrt);
1575 }
1576 out:
1577 dst_release(&rt->u.dst);
1578 }
1579
1580 /*
1581 * Misc support functions
1582 */
1583
1584 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1585 {
1586 struct net *net = dev_net(ort->rt6i_dev);
1587 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1588
1589 if (rt) {
1590 rt->u.dst.input = ort->u.dst.input;
1591 rt->u.dst.output = ort->u.dst.output;
1592
1593 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1594 rt->u.dst.error = ort->u.dst.error;
1595 rt->u.dst.dev = ort->u.dst.dev;
1596 if (rt->u.dst.dev)
1597 dev_hold(rt->u.dst.dev);
1598 rt->rt6i_idev = ort->rt6i_idev;
1599 if (rt->rt6i_idev)
1600 in6_dev_hold(rt->rt6i_idev);
1601 rt->u.dst.lastuse = jiffies;
1602 rt->rt6i_expires = 0;
1603
1604 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1605 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1606 rt->rt6i_metric = 0;
1607
1608 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1609 #ifdef CONFIG_IPV6_SUBTREES
1610 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1611 #endif
1612 rt->rt6i_table = ort->rt6i_table;
1613 }
1614 return rt;
1615 }
1616
1617 #ifdef CONFIG_IPV6_ROUTE_INFO
1618 static struct rt6_info *rt6_get_route_info(struct net *net,
1619 struct in6_addr *prefix, int prefixlen,
1620 struct in6_addr *gwaddr, int ifindex)
1621 {
1622 struct fib6_node *fn;
1623 struct rt6_info *rt = NULL;
1624 struct fib6_table *table;
1625
1626 table = fib6_get_table(net, RT6_TABLE_INFO);
1627 if (table == NULL)
1628 return NULL;
1629
1630 write_lock_bh(&table->tb6_lock);
1631 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1632 if (!fn)
1633 goto out;
1634
1635 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1636 if (rt->rt6i_dev->ifindex != ifindex)
1637 continue;
1638 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1639 continue;
1640 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1641 continue;
1642 dst_hold(&rt->u.dst);
1643 break;
1644 }
1645 out:
1646 write_unlock_bh(&table->tb6_lock);
1647 return rt;
1648 }
1649
1650 static struct rt6_info *rt6_add_route_info(struct net *net,
1651 struct in6_addr *prefix, int prefixlen,
1652 struct in6_addr *gwaddr, int ifindex,
1653 unsigned pref)
1654 {
1655 struct fib6_config cfg = {
1656 .fc_table = RT6_TABLE_INFO,
1657 .fc_metric = IP6_RT_PRIO_USER,
1658 .fc_ifindex = ifindex,
1659 .fc_dst_len = prefixlen,
1660 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1661 RTF_UP | RTF_PREF(pref),
1662 .fc_nlinfo.pid = 0,
1663 .fc_nlinfo.nlh = NULL,
1664 .fc_nlinfo.nl_net = net,
1665 };
1666
1667 ipv6_addr_copy(&cfg.fc_dst, prefix);
1668 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1669
1670 /* We should treat it as a default route if prefix length is 0. */
1671 if (!prefixlen)
1672 cfg.fc_flags |= RTF_DEFAULT;
1673
1674 ip6_route_add(&cfg);
1675
1676 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1677 }
1678 #endif
1679
1680 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1681 {
1682 struct rt6_info *rt;
1683 struct fib6_table *table;
1684
1685 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1686 if (table == NULL)
1687 return NULL;
1688
1689 write_lock_bh(&table->tb6_lock);
1690 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1691 if (dev == rt->rt6i_dev &&
1692 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1693 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1694 break;
1695 }
1696 if (rt)
1697 dst_hold(&rt->u.dst);
1698 write_unlock_bh(&table->tb6_lock);
1699 return rt;
1700 }
1701
1702 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1703 struct net_device *dev,
1704 unsigned int pref)
1705 {
1706 struct fib6_config cfg = {
1707 .fc_table = RT6_TABLE_DFLT,
1708 .fc_metric = IP6_RT_PRIO_USER,
1709 .fc_ifindex = dev->ifindex,
1710 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1711 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1712 .fc_nlinfo.pid = 0,
1713 .fc_nlinfo.nlh = NULL,
1714 .fc_nlinfo.nl_net = dev_net(dev),
1715 };
1716
1717 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1718
1719 ip6_route_add(&cfg);
1720
1721 return rt6_get_dflt_router(gwaddr, dev);
1722 }
1723
1724 void rt6_purge_dflt_routers(struct net *net)
1725 {
1726 struct rt6_info *rt;
1727 struct fib6_table *table;
1728
1729 /* NOTE: Keep consistent with rt6_get_dflt_router */
1730 table = fib6_get_table(net, RT6_TABLE_DFLT);
1731 if (table == NULL)
1732 return;
1733
1734 restart:
1735 read_lock_bh(&table->tb6_lock);
1736 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1737 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1738 dst_hold(&rt->u.dst);
1739 read_unlock_bh(&table->tb6_lock);
1740 ip6_del_rt(rt);
1741 goto restart;
1742 }
1743 }
1744 read_unlock_bh(&table->tb6_lock);
1745 }
1746
1747 static void rtmsg_to_fib6_config(struct net *net,
1748 struct in6_rtmsg *rtmsg,
1749 struct fib6_config *cfg)
1750 {
1751 memset(cfg, 0, sizeof(*cfg));
1752
1753 cfg->fc_table = RT6_TABLE_MAIN;
1754 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1755 cfg->fc_metric = rtmsg->rtmsg_metric;
1756 cfg->fc_expires = rtmsg->rtmsg_info;
1757 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1758 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1759 cfg->fc_flags = rtmsg->rtmsg_flags;
1760
1761 cfg->fc_nlinfo.nl_net = net;
1762
1763 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1764 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1765 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1766 }
1767
1768 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1769 {
1770 struct fib6_config cfg;
1771 struct in6_rtmsg rtmsg;
1772 int err;
1773
1774 switch(cmd) {
1775 case SIOCADDRT: /* Add a route */
1776 case SIOCDELRT: /* Delete a route */
1777 if (!capable(CAP_NET_ADMIN))
1778 return -EPERM;
1779 err = copy_from_user(&rtmsg, arg,
1780 sizeof(struct in6_rtmsg));
1781 if (err)
1782 return -EFAULT;
1783
1784 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1785
1786 rtnl_lock();
1787 switch (cmd) {
1788 case SIOCADDRT:
1789 err = ip6_route_add(&cfg);
1790 break;
1791 case SIOCDELRT:
1792 err = ip6_route_del(&cfg);
1793 break;
1794 default:
1795 err = -EINVAL;
1796 }
1797 rtnl_unlock();
1798
1799 return err;
1800 }
1801
1802 return -EINVAL;
1803 }
1804
1805 /*
1806 * Drop the packet on the floor
1807 */
1808
1809 static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes)
1810 {
1811 int type;
1812 switch (ipstats_mib_noroutes) {
1813 case IPSTATS_MIB_INNOROUTES:
1814 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1815 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
1816 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
1817 break;
1818 }
1819 /* FALLTHROUGH */
1820 case IPSTATS_MIB_OUTNOROUTES:
1821 IP6_INC_STATS(ip6_dst_idev(skb->dst), ipstats_mib_noroutes);
1822 break;
1823 }
1824 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1825 kfree_skb(skb);
1826 return 0;
1827 }
1828
1829 static int ip6_pkt_discard(struct sk_buff *skb)
1830 {
1831 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1832 }
1833
1834 static int ip6_pkt_discard_out(struct sk_buff *skb)
1835 {
1836 skb->dev = skb->dst->dev;
1837 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1838 }
1839
1840 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1841
1842 static int ip6_pkt_prohibit(struct sk_buff *skb)
1843 {
1844 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1845 }
1846
1847 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1848 {
1849 skb->dev = skb->dst->dev;
1850 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1851 }
1852
1853 #endif
1854
1855 /*
1856 * Allocate a dst for local (unicast / anycast) address.
1857 */
1858
1859 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1860 const struct in6_addr *addr,
1861 int anycast)
1862 {
1863 struct net *net = dev_net(idev->dev);
1864 struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
1865
1866 if (rt == NULL)
1867 return ERR_PTR(-ENOMEM);
1868
1869 dev_hold(net->loopback_dev);
1870 in6_dev_hold(idev);
1871
1872 rt->u.dst.flags = DST_HOST;
1873 rt->u.dst.input = ip6_input;
1874 rt->u.dst.output = ip6_output;
1875 rt->rt6i_dev = net->loopback_dev;
1876 rt->rt6i_idev = idev;
1877 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1878 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1879 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1880 rt->u.dst.obsolete = -1;
1881
1882 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1883 if (anycast)
1884 rt->rt6i_flags |= RTF_ANYCAST;
1885 else
1886 rt->rt6i_flags |= RTF_LOCAL;
1887 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1888 if (rt->rt6i_nexthop == NULL) {
1889 dst_free(&rt->u.dst);
1890 return ERR_PTR(-ENOMEM);
1891 }
1892
1893 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1894 rt->rt6i_dst.plen = 128;
1895 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1896
1897 atomic_set(&rt->u.dst.__refcnt, 1);
1898
1899 return rt;
1900 }
1901
1902 struct arg_dev_net {
1903 struct net_device *dev;
1904 struct net *net;
1905 };
1906
1907 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1908 {
1909 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1910 struct net *net = ((struct arg_dev_net *)arg)->net;
1911
1912 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1913 rt != net->ipv6.ip6_null_entry) {
1914 RT6_TRACE("deleted by ifdown %p\n", rt);
1915 return -1;
1916 }
1917 return 0;
1918 }
1919
1920 void rt6_ifdown(struct net *net, struct net_device *dev)
1921 {
1922 struct arg_dev_net adn = {
1923 .dev = dev,
1924 .net = net,
1925 };
1926
1927 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1928 }
1929
1930 struct rt6_mtu_change_arg
1931 {
1932 struct net_device *dev;
1933 unsigned mtu;
1934 };
1935
1936 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1937 {
1938 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1939 struct inet6_dev *idev;
1940 struct net *net = dev_net(arg->dev);
1941
1942 /* In IPv6 pmtu discovery is not optional,
1943 so that RTAX_MTU lock cannot disable it.
1944 We still use this lock to block changes
1945 caused by addrconf/ndisc.
1946 */
1947
1948 idev = __in6_dev_get(arg->dev);
1949 if (idev == NULL)
1950 return 0;
1951
1952 /* For administrative MTU increase, there is no way to discover
1953 IPv6 PMTU increase, so PMTU increase should be updated here.
1954 Since RFC 1981 doesn't include administrative MTU increase
1955 update PMTU increase is a MUST. (i.e. jumbo frame)
1956 */
1957 /*
1958 If new MTU is less than route PMTU, this new MTU will be the
1959 lowest MTU in the path, update the route PMTU to reflect PMTU
1960 decreases; if new MTU is greater than route PMTU, and the
1961 old MTU is the lowest MTU in the path, update the route PMTU
1962 to reflect the increase. In this case if the other nodes' MTU
1963 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1964 PMTU discouvery.
1965 */
1966 if (rt->rt6i_dev == arg->dev &&
1967 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1968 (dst_mtu(&rt->u.dst) >= arg->mtu ||
1969 (dst_mtu(&rt->u.dst) < arg->mtu &&
1970 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1971 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1972 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
1973 }
1974 return 0;
1975 }
1976
1977 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1978 {
1979 struct rt6_mtu_change_arg arg = {
1980 .dev = dev,
1981 .mtu = mtu,
1982 };
1983
1984 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
1985 }
1986
1987 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
1988 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
1989 [RTA_OIF] = { .type = NLA_U32 },
1990 [RTA_IIF] = { .type = NLA_U32 },
1991 [RTA_PRIORITY] = { .type = NLA_U32 },
1992 [RTA_METRICS] = { .type = NLA_NESTED },
1993 };
1994
1995 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1996 struct fib6_config *cfg)
1997 {
1998 struct rtmsg *rtm;
1999 struct nlattr *tb[RTA_MAX+1];
2000 int err;
2001
2002 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2003 if (err < 0)
2004 goto errout;
2005
2006 err = -EINVAL;
2007 rtm = nlmsg_data(nlh);
2008 memset(cfg, 0, sizeof(*cfg));
2009
2010 cfg->fc_table = rtm->rtm_table;
2011 cfg->fc_dst_len = rtm->rtm_dst_len;
2012 cfg->fc_src_len = rtm->rtm_src_len;
2013 cfg->fc_flags = RTF_UP;
2014 cfg->fc_protocol = rtm->rtm_protocol;
2015
2016 if (rtm->rtm_type == RTN_UNREACHABLE)
2017 cfg->fc_flags |= RTF_REJECT;
2018
2019 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2020 cfg->fc_nlinfo.nlh = nlh;
2021 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2022
2023 if (tb[RTA_GATEWAY]) {
2024 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2025 cfg->fc_flags |= RTF_GATEWAY;
2026 }
2027
2028 if (tb[RTA_DST]) {
2029 int plen = (rtm->rtm_dst_len + 7) >> 3;
2030
2031 if (nla_len(tb[RTA_DST]) < plen)
2032 goto errout;
2033
2034 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2035 }
2036
2037 if (tb[RTA_SRC]) {
2038 int plen = (rtm->rtm_src_len + 7) >> 3;
2039
2040 if (nla_len(tb[RTA_SRC]) < plen)
2041 goto errout;
2042
2043 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2044 }
2045
2046 if (tb[RTA_OIF])
2047 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2048
2049 if (tb[RTA_PRIORITY])
2050 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2051
2052 if (tb[RTA_METRICS]) {
2053 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2054 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2055 }
2056
2057 if (tb[RTA_TABLE])
2058 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2059
2060 err = 0;
2061 errout:
2062 return err;
2063 }
2064
2065 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2066 {
2067 struct fib6_config cfg;
2068 int err;
2069
2070 err = rtm_to_fib6_config(skb, nlh, &cfg);
2071 if (err < 0)
2072 return err;
2073
2074 return ip6_route_del(&cfg);
2075 }
2076
2077 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2078 {
2079 struct fib6_config cfg;
2080 int err;
2081
2082 err = rtm_to_fib6_config(skb, nlh, &cfg);
2083 if (err < 0)
2084 return err;
2085
2086 return ip6_route_add(&cfg);
2087 }
2088
2089 static inline size_t rt6_nlmsg_size(void)
2090 {
2091 return NLMSG_ALIGN(sizeof(struct rtmsg))
2092 + nla_total_size(16) /* RTA_SRC */
2093 + nla_total_size(16) /* RTA_DST */
2094 + nla_total_size(16) /* RTA_GATEWAY */
2095 + nla_total_size(16) /* RTA_PREFSRC */
2096 + nla_total_size(4) /* RTA_TABLE */
2097 + nla_total_size(4) /* RTA_IIF */
2098 + nla_total_size(4) /* RTA_OIF */
2099 + nla_total_size(4) /* RTA_PRIORITY */
2100 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2101 + nla_total_size(sizeof(struct rta_cacheinfo));
2102 }
2103
2104 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2105 struct in6_addr *dst, struct in6_addr *src,
2106 int iif, int type, u32 pid, u32 seq,
2107 int prefix, unsigned int flags)
2108 {
2109 struct rtmsg *rtm;
2110 struct nlmsghdr *nlh;
2111 long expires;
2112 u32 table;
2113
2114 if (prefix) { /* user wants prefix routes only */
2115 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2116 /* success since this is not a prefix route */
2117 return 1;
2118 }
2119 }
2120
2121 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2122 if (nlh == NULL)
2123 return -EMSGSIZE;
2124
2125 rtm = nlmsg_data(nlh);
2126 rtm->rtm_family = AF_INET6;
2127 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2128 rtm->rtm_src_len = rt->rt6i_src.plen;
2129 rtm->rtm_tos = 0;
2130 if (rt->rt6i_table)
2131 table = rt->rt6i_table->tb6_id;
2132 else
2133 table = RT6_TABLE_UNSPEC;
2134 rtm->rtm_table = table;
2135 NLA_PUT_U32(skb, RTA_TABLE, table);
2136 if (rt->rt6i_flags&RTF_REJECT)
2137 rtm->rtm_type = RTN_UNREACHABLE;
2138 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2139 rtm->rtm_type = RTN_LOCAL;
2140 else
2141 rtm->rtm_type = RTN_UNICAST;
2142 rtm->rtm_flags = 0;
2143 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2144 rtm->rtm_protocol = rt->rt6i_protocol;
2145 if (rt->rt6i_flags&RTF_DYNAMIC)
2146 rtm->rtm_protocol = RTPROT_REDIRECT;
2147 else if (rt->rt6i_flags & RTF_ADDRCONF)
2148 rtm->rtm_protocol = RTPROT_KERNEL;
2149 else if (rt->rt6i_flags&RTF_DEFAULT)
2150 rtm->rtm_protocol = RTPROT_RA;
2151
2152 if (rt->rt6i_flags&RTF_CACHE)
2153 rtm->rtm_flags |= RTM_F_CLONED;
2154
2155 if (dst) {
2156 NLA_PUT(skb, RTA_DST, 16, dst);
2157 rtm->rtm_dst_len = 128;
2158 } else if (rtm->rtm_dst_len)
2159 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2160 #ifdef CONFIG_IPV6_SUBTREES
2161 if (src) {
2162 NLA_PUT(skb, RTA_SRC, 16, src);
2163 rtm->rtm_src_len = 128;
2164 } else if (rtm->rtm_src_len)
2165 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2166 #endif
2167 if (iif)
2168 NLA_PUT_U32(skb, RTA_IIF, iif);
2169 else if (dst) {
2170 struct in6_addr saddr_buf;
2171 if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
2172 dst, 0, &saddr_buf) == 0)
2173 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2174 }
2175
2176 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2177 goto nla_put_failure;
2178
2179 if (rt->u.dst.neighbour)
2180 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2181
2182 if (rt->u.dst.dev)
2183 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2184
2185 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2186
2187 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
2188 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2189 expires, rt->u.dst.error) < 0)
2190 goto nla_put_failure;
2191
2192 return nlmsg_end(skb, nlh);
2193
2194 nla_put_failure:
2195 nlmsg_cancel(skb, nlh);
2196 return -EMSGSIZE;
2197 }
2198
2199 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2200 {
2201 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2202 int prefix;
2203
2204 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2205 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2206 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2207 } else
2208 prefix = 0;
2209
2210 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2211 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2212 prefix, NLM_F_MULTI);
2213 }
2214
2215 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2216 {
2217 struct net *net = sock_net(in_skb->sk);
2218 struct nlattr *tb[RTA_MAX+1];
2219 struct rt6_info *rt;
2220 struct sk_buff *skb;
2221 struct rtmsg *rtm;
2222 struct flowi fl;
2223 int err, iif = 0;
2224
2225 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2226 if (err < 0)
2227 goto errout;
2228
2229 err = -EINVAL;
2230 memset(&fl, 0, sizeof(fl));
2231
2232 if (tb[RTA_SRC]) {
2233 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2234 goto errout;
2235
2236 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2237 }
2238
2239 if (tb[RTA_DST]) {
2240 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2241 goto errout;
2242
2243 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2244 }
2245
2246 if (tb[RTA_IIF])
2247 iif = nla_get_u32(tb[RTA_IIF]);
2248
2249 if (tb[RTA_OIF])
2250 fl.oif = nla_get_u32(tb[RTA_OIF]);
2251
2252 if (iif) {
2253 struct net_device *dev;
2254 dev = __dev_get_by_index(net, iif);
2255 if (!dev) {
2256 err = -ENODEV;
2257 goto errout;
2258 }
2259 }
2260
2261 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2262 if (skb == NULL) {
2263 err = -ENOBUFS;
2264 goto errout;
2265 }
2266
2267 /* Reserve room for dummy headers, this skb can pass
2268 through good chunk of routing engine.
2269 */
2270 skb_reset_mac_header(skb);
2271 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2272
2273 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2274 skb->dst = &rt->u.dst;
2275
2276 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2277 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2278 nlh->nlmsg_seq, 0, 0);
2279 if (err < 0) {
2280 kfree_skb(skb);
2281 goto errout;
2282 }
2283
2284 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2285 errout:
2286 return err;
2287 }
2288
2289 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2290 {
2291 struct sk_buff *skb;
2292 struct net *net = info->nl_net;
2293 u32 seq;
2294 int err;
2295
2296 err = -ENOBUFS;
2297 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2298
2299 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2300 if (skb == NULL)
2301 goto errout;
2302
2303 err = rt6_fill_node(skb, rt, NULL, NULL, 0,
2304 event, info->pid, seq, 0, 0);
2305 if (err < 0) {
2306 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2307 WARN_ON(err == -EMSGSIZE);
2308 kfree_skb(skb);
2309 goto errout;
2310 }
2311 err = rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2312 info->nlh, gfp_any());
2313 errout:
2314 if (err < 0)
2315 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2316 }
2317
2318 static int ip6_route_dev_notify(struct notifier_block *this,
2319 unsigned long event, void *data)
2320 {
2321 struct net_device *dev = (struct net_device *)data;
2322 struct net *net = dev_net(dev);
2323
2324 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2325 net->ipv6.ip6_null_entry->u.dst.dev = dev;
2326 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2327 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2328 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
2329 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2330 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
2331 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2332 #endif
2333 }
2334
2335 return NOTIFY_OK;
2336 }
2337
2338 /*
2339 * /proc
2340 */
2341
2342 #ifdef CONFIG_PROC_FS
2343
2344 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2345
2346 struct rt6_proc_arg
2347 {
2348 char *buffer;
2349 int offset;
2350 int length;
2351 int skip;
2352 int len;
2353 };
2354
2355 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2356 {
2357 struct seq_file *m = p_arg;
2358
2359 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
2360 rt->rt6i_dst.plen);
2361
2362 #ifdef CONFIG_IPV6_SUBTREES
2363 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
2364 rt->rt6i_src.plen);
2365 #else
2366 seq_puts(m, "00000000000000000000000000000000 00 ");
2367 #endif
2368
2369 if (rt->rt6i_nexthop) {
2370 seq_printf(m, NIP6_SEQFMT,
2371 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2372 } else {
2373 seq_puts(m, "00000000000000000000000000000000");
2374 }
2375 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2376 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2377 rt->u.dst.__use, rt->rt6i_flags,
2378 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2379 return 0;
2380 }
2381
2382 static int ipv6_route_show(struct seq_file *m, void *v)
2383 {
2384 struct net *net = (struct net *)m->private;
2385 fib6_clean_all(net, rt6_info_route, 0, m);
2386 return 0;
2387 }
2388
2389 static int ipv6_route_open(struct inode *inode, struct file *file)
2390 {
2391 int err;
2392 struct net *net = get_proc_net(inode);
2393 if (!net)
2394 return -ENXIO;
2395
2396 err = single_open(file, ipv6_route_show, net);
2397 if (err < 0) {
2398 put_net(net);
2399 return err;
2400 }
2401
2402 return 0;
2403 }
2404
2405 static int ipv6_route_release(struct inode *inode, struct file *file)
2406 {
2407 struct seq_file *seq = file->private_data;
2408 struct net *net = seq->private;
2409 put_net(net);
2410 return single_release(inode, file);
2411 }
2412
2413 static const struct file_operations ipv6_route_proc_fops = {
2414 .owner = THIS_MODULE,
2415 .open = ipv6_route_open,
2416 .read = seq_read,
2417 .llseek = seq_lseek,
2418 .release = ipv6_route_release,
2419 };
2420
2421 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2422 {
2423 struct net *net = (struct net *)seq->private;
2424 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2425 net->ipv6.rt6_stats->fib_nodes,
2426 net->ipv6.rt6_stats->fib_route_nodes,
2427 net->ipv6.rt6_stats->fib_rt_alloc,
2428 net->ipv6.rt6_stats->fib_rt_entries,
2429 net->ipv6.rt6_stats->fib_rt_cache,
2430 atomic_read(&net->ipv6.ip6_dst_ops->entries),
2431 net->ipv6.rt6_stats->fib_discarded_routes);
2432
2433 return 0;
2434 }
2435
2436 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2437 {
2438 int err;
2439 struct net *net = get_proc_net(inode);
2440 if (!net)
2441 return -ENXIO;
2442
2443 err = single_open(file, rt6_stats_seq_show, net);
2444 if (err < 0) {
2445 put_net(net);
2446 return err;
2447 }
2448
2449 return 0;
2450 }
2451
2452 static int rt6_stats_seq_release(struct inode *inode, struct file *file)
2453 {
2454 struct seq_file *seq = file->private_data;
2455 struct net *net = (struct net *)seq->private;
2456 put_net(net);
2457 return single_release(inode, file);
2458 }
2459
2460 static const struct file_operations rt6_stats_seq_fops = {
2461 .owner = THIS_MODULE,
2462 .open = rt6_stats_seq_open,
2463 .read = seq_read,
2464 .llseek = seq_lseek,
2465 .release = rt6_stats_seq_release,
2466 };
2467 #endif /* CONFIG_PROC_FS */
2468
2469 #ifdef CONFIG_SYSCTL
2470
2471 static
2472 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2473 void __user *buffer, size_t *lenp, loff_t *ppos)
2474 {
2475 struct net *net = current->nsproxy->net_ns;
2476 int delay = net->ipv6.sysctl.flush_delay;
2477 if (write) {
2478 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2479 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2480 return 0;
2481 } else
2482 return -EINVAL;
2483 }
2484
2485 ctl_table ipv6_route_table_template[] = {
2486 {
2487 .procname = "flush",
2488 .data = &init_net.ipv6.sysctl.flush_delay,
2489 .maxlen = sizeof(int),
2490 .mode = 0200,
2491 .proc_handler = &ipv6_sysctl_rtcache_flush
2492 },
2493 {
2494 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2495 .procname = "gc_thresh",
2496 .data = &ip6_dst_ops_template.gc_thresh,
2497 .maxlen = sizeof(int),
2498 .mode = 0644,
2499 .proc_handler = &proc_dointvec,
2500 },
2501 {
2502 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2503 .procname = "max_size",
2504 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2505 .maxlen = sizeof(int),
2506 .mode = 0644,
2507 .proc_handler = &proc_dointvec,
2508 },
2509 {
2510 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2511 .procname = "gc_min_interval",
2512 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2513 .maxlen = sizeof(int),
2514 .mode = 0644,
2515 .proc_handler = &proc_dointvec_jiffies,
2516 .strategy = &sysctl_jiffies,
2517 },
2518 {
2519 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2520 .procname = "gc_timeout",
2521 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2522 .maxlen = sizeof(int),
2523 .mode = 0644,
2524 .proc_handler = &proc_dointvec_jiffies,
2525 .strategy = &sysctl_jiffies,
2526 },
2527 {
2528 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2529 .procname = "gc_interval",
2530 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2531 .maxlen = sizeof(int),
2532 .mode = 0644,
2533 .proc_handler = &proc_dointvec_jiffies,
2534 .strategy = &sysctl_jiffies,
2535 },
2536 {
2537 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2538 .procname = "gc_elasticity",
2539 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2540 .maxlen = sizeof(int),
2541 .mode = 0644,
2542 .proc_handler = &proc_dointvec_jiffies,
2543 .strategy = &sysctl_jiffies,
2544 },
2545 {
2546 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2547 .procname = "mtu_expires",
2548 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2549 .maxlen = sizeof(int),
2550 .mode = 0644,
2551 .proc_handler = &proc_dointvec_jiffies,
2552 .strategy = &sysctl_jiffies,
2553 },
2554 {
2555 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2556 .procname = "min_adv_mss",
2557 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2558 .maxlen = sizeof(int),
2559 .mode = 0644,
2560 .proc_handler = &proc_dointvec_jiffies,
2561 .strategy = &sysctl_jiffies,
2562 },
2563 {
2564 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2565 .procname = "gc_min_interval_ms",
2566 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2567 .maxlen = sizeof(int),
2568 .mode = 0644,
2569 .proc_handler = &proc_dointvec_ms_jiffies,
2570 .strategy = &sysctl_ms_jiffies,
2571 },
2572 { .ctl_name = 0 }
2573 };
2574
2575 struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2576 {
2577 struct ctl_table *table;
2578
2579 table = kmemdup(ipv6_route_table_template,
2580 sizeof(ipv6_route_table_template),
2581 GFP_KERNEL);
2582
2583 if (table) {
2584 table[0].data = &net->ipv6.sysctl.flush_delay;
2585 table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh;
2586 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2587 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2588 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2589 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2590 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2591 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2592 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2593 }
2594
2595 return table;
2596 }
2597 #endif
2598
2599 static int ip6_route_net_init(struct net *net)
2600 {
2601 int ret = 0;
2602
2603 ret = -ENOMEM;
2604 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2605 sizeof(*net->ipv6.ip6_dst_ops),
2606 GFP_KERNEL);
2607 if (!net->ipv6.ip6_dst_ops)
2608 goto out;
2609 net->ipv6.ip6_dst_ops->dst_net = net;
2610
2611 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2612 sizeof(*net->ipv6.ip6_null_entry),
2613 GFP_KERNEL);
2614 if (!net->ipv6.ip6_null_entry)
2615 goto out_ip6_dst_ops;
2616 net->ipv6.ip6_null_entry->u.dst.path =
2617 (struct dst_entry *)net->ipv6.ip6_null_entry;
2618 net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2619
2620 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2621 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2622 sizeof(*net->ipv6.ip6_prohibit_entry),
2623 GFP_KERNEL);
2624 if (!net->ipv6.ip6_prohibit_entry) {
2625 kfree(net->ipv6.ip6_null_entry);
2626 goto out;
2627 }
2628 net->ipv6.ip6_prohibit_entry->u.dst.path =
2629 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2630 net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2631
2632 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2633 sizeof(*net->ipv6.ip6_blk_hole_entry),
2634 GFP_KERNEL);
2635 if (!net->ipv6.ip6_blk_hole_entry) {
2636 kfree(net->ipv6.ip6_null_entry);
2637 kfree(net->ipv6.ip6_prohibit_entry);
2638 goto out;
2639 }
2640 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2641 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2642 net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
2643 #endif
2644
2645 #ifdef CONFIG_PROC_FS
2646 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2647 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2648 #endif
2649 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2650
2651 ret = 0;
2652 out:
2653 return ret;
2654
2655 out_ip6_dst_ops:
2656 kfree(net->ipv6.ip6_dst_ops);
2657 goto out;
2658 }
2659
2660 static void ip6_route_net_exit(struct net *net)
2661 {
2662 #ifdef CONFIG_PROC_FS
2663 proc_net_remove(net, "ipv6_route");
2664 proc_net_remove(net, "rt6_stats");
2665 #endif
2666 kfree(net->ipv6.ip6_null_entry);
2667 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2668 kfree(net->ipv6.ip6_prohibit_entry);
2669 kfree(net->ipv6.ip6_blk_hole_entry);
2670 #endif
2671 kfree(net->ipv6.ip6_dst_ops);
2672 }
2673
2674 static struct pernet_operations ip6_route_net_ops = {
2675 .init = ip6_route_net_init,
2676 .exit = ip6_route_net_exit,
2677 };
2678
2679 static struct notifier_block ip6_route_dev_notifier = {
2680 .notifier_call = ip6_route_dev_notify,
2681 .priority = 0,
2682 };
2683
2684 int __init ip6_route_init(void)
2685 {
2686 int ret;
2687
2688 ret = -ENOMEM;
2689 ip6_dst_ops_template.kmem_cachep =
2690 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2691 SLAB_HWCACHE_ALIGN, NULL);
2692 if (!ip6_dst_ops_template.kmem_cachep)
2693 goto out;;
2694
2695 ret = register_pernet_subsys(&ip6_route_net_ops);
2696 if (ret)
2697 goto out_kmem_cache;
2698
2699 /* Registering of the loopback is done before this portion of code,
2700 * the loopback reference in rt6_info will not be taken, do it
2701 * manually for init_net */
2702 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
2703 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2704 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2705 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
2706 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2707 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
2708 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2709 #endif
2710 ret = fib6_init();
2711 if (ret)
2712 goto out_register_subsys;
2713
2714 ret = xfrm6_init();
2715 if (ret)
2716 goto out_fib6_init;
2717
2718 ret = fib6_rules_init();
2719 if (ret)
2720 goto xfrm6_init;
2721
2722 ret = -ENOBUFS;
2723 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2724 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2725 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2726 goto fib6_rules_init;
2727
2728 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2729 if (ret)
2730 goto fib6_rules_init;
2731
2732 out:
2733 return ret;
2734
2735 fib6_rules_init:
2736 fib6_rules_cleanup();
2737 xfrm6_init:
2738 xfrm6_fini();
2739 out_fib6_init:
2740 fib6_gc_cleanup();
2741 out_register_subsys:
2742 unregister_pernet_subsys(&ip6_route_net_ops);
2743 out_kmem_cache:
2744 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2745 goto out;
2746 }
2747
2748 void ip6_route_cleanup(void)
2749 {
2750 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2751 fib6_rules_cleanup();
2752 xfrm6_fini();
2753 fib6_gc_cleanup();
2754 unregister_pernet_subsys(&ip6_route_net_ops);
2755 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2756 }