[IPV6]: Add RFC4214 support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /* Changes:
17 *
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
25 * Ville Nuorvala
26 * Fixed routing subtrees.
27 */
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/net_namespace.h>
44 #include <net/snmp.h>
45 #include <net/ipv6.h>
46 #include <net/ip6_fib.h>
47 #include <net/ip6_route.h>
48 #include <net/ndisc.h>
49 #include <net/addrconf.h>
50 #include <net/tcp.h>
51 #include <linux/rtnetlink.h>
52 #include <net/dst.h>
53 #include <net/xfrm.h>
54 #include <net/netevent.h>
55 #include <net/netlink.h>
56
57 #include <asm/uaccess.h>
58
59 #ifdef CONFIG_SYSCTL
60 #include <linux/sysctl.h>
61 #endif
62
63 /* Set to 3 to get tracing. */
64 #define RT6_DEBUG 2
65
66 #if RT6_DEBUG >= 3
67 #define RDBG(x) printk x
68 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
69 #else
70 #define RDBG(x)
71 #define RT6_TRACE(x...) do { ; } while (0)
72 #endif
73
74 #define CLONE_OFFLINK_ROUTE 0
75
76 static int ip6_rt_max_size = 4096;
77 static int ip6_rt_gc_min_interval = HZ / 2;
78 static int ip6_rt_gc_timeout = 60*HZ;
79 int ip6_rt_gc_interval = 30*HZ;
80 static int ip6_rt_gc_elasticity = 9;
81 static int ip6_rt_mtu_expires = 10*60*HZ;
82 static int ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
83
84 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
85 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void ip6_dst_destroy(struct dst_entry *);
88 static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90 static int ip6_dst_gc(void);
91
92 static int ip6_pkt_discard(struct sk_buff *skb);
93 static int ip6_pkt_discard_out(struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
96
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
99 struct in6_addr *gwaddr, int ifindex,
100 unsigned pref);
101 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
102 struct in6_addr *gwaddr, int ifindex);
103 #endif
104
105 static struct dst_ops ip6_dst_ops = {
106 .family = AF_INET6,
107 .protocol = __constant_htons(ETH_P_IPV6),
108 .gc = ip6_dst_gc,
109 .gc_thresh = 1024,
110 .check = ip6_dst_check,
111 .destroy = ip6_dst_destroy,
112 .ifdown = ip6_dst_ifdown,
113 .negative_advice = ip6_negative_advice,
114 .link_failure = ip6_link_failure,
115 .update_pmtu = ip6_rt_update_pmtu,
116 .local_out = ip6_local_out,
117 .entry_size = sizeof(struct rt6_info),
118 };
119
120 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
121 {
122 }
123
124 static struct dst_ops ip6_dst_blackhole_ops = {
125 .family = AF_INET6,
126 .protocol = __constant_htons(ETH_P_IPV6),
127 .destroy = ip6_dst_destroy,
128 .check = ip6_dst_check,
129 .update_pmtu = ip6_rt_blackhole_update_pmtu,
130 .entry_size = sizeof(struct rt6_info),
131 };
132
133 struct rt6_info ip6_null_entry = {
134 .u = {
135 .dst = {
136 .__refcnt = ATOMIC_INIT(1),
137 .__use = 1,
138 .obsolete = -1,
139 .error = -ENETUNREACH,
140 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
141 .input = ip6_pkt_discard,
142 .output = ip6_pkt_discard_out,
143 .ops = &ip6_dst_ops,
144 .path = (struct dst_entry*)&ip6_null_entry,
145 }
146 },
147 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
148 .rt6i_metric = ~(u32) 0,
149 .rt6i_ref = ATOMIC_INIT(1),
150 };
151
152 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
153
154 static int ip6_pkt_prohibit(struct sk_buff *skb);
155 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
156
157 struct rt6_info ip6_prohibit_entry = {
158 .u = {
159 .dst = {
160 .__refcnt = ATOMIC_INIT(1),
161 .__use = 1,
162 .obsolete = -1,
163 .error = -EACCES,
164 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
165 .input = ip6_pkt_prohibit,
166 .output = ip6_pkt_prohibit_out,
167 .ops = &ip6_dst_ops,
168 .path = (struct dst_entry*)&ip6_prohibit_entry,
169 }
170 },
171 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
172 .rt6i_metric = ~(u32) 0,
173 .rt6i_ref = ATOMIC_INIT(1),
174 };
175
176 struct rt6_info ip6_blk_hole_entry = {
177 .u = {
178 .dst = {
179 .__refcnt = ATOMIC_INIT(1),
180 .__use = 1,
181 .obsolete = -1,
182 .error = -EINVAL,
183 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
184 .input = dst_discard,
185 .output = dst_discard,
186 .ops = &ip6_dst_ops,
187 .path = (struct dst_entry*)&ip6_blk_hole_entry,
188 }
189 },
190 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
191 .rt6i_metric = ~(u32) 0,
192 .rt6i_ref = ATOMIC_INIT(1),
193 };
194
195 #endif
196
197 /* allocate dst with ip6_dst_ops */
198 static __inline__ struct rt6_info *ip6_dst_alloc(void)
199 {
200 return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
201 }
202
203 static void ip6_dst_destroy(struct dst_entry *dst)
204 {
205 struct rt6_info *rt = (struct rt6_info *)dst;
206 struct inet6_dev *idev = rt->rt6i_idev;
207
208 if (idev != NULL) {
209 rt->rt6i_idev = NULL;
210 in6_dev_put(idev);
211 }
212 }
213
214 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
215 int how)
216 {
217 struct rt6_info *rt = (struct rt6_info *)dst;
218 struct inet6_dev *idev = rt->rt6i_idev;
219
220 if (dev != init_net.loopback_dev && idev != NULL && idev->dev == dev) {
221 struct inet6_dev *loopback_idev = in6_dev_get(init_net.loopback_dev);
222 if (loopback_idev != NULL) {
223 rt->rt6i_idev = loopback_idev;
224 in6_dev_put(idev);
225 }
226 }
227 }
228
229 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
230 {
231 return (rt->rt6i_flags & RTF_EXPIRES &&
232 time_after(jiffies, rt->rt6i_expires));
233 }
234
235 static inline int rt6_need_strict(struct in6_addr *daddr)
236 {
237 return (ipv6_addr_type(daddr) &
238 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
239 }
240
241 /*
242 * Route lookup. Any table->tb6_lock is implied.
243 */
244
245 static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
246 int oif,
247 int strict)
248 {
249 struct rt6_info *local = NULL;
250 struct rt6_info *sprt;
251
252 if (oif) {
253 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
254 struct net_device *dev = sprt->rt6i_dev;
255 if (dev->ifindex == oif)
256 return sprt;
257 if (dev->flags & IFF_LOOPBACK) {
258 if (sprt->rt6i_idev == NULL ||
259 sprt->rt6i_idev->dev->ifindex != oif) {
260 if (strict && oif)
261 continue;
262 if (local && (!oif ||
263 local->rt6i_idev->dev->ifindex == oif))
264 continue;
265 }
266 local = sprt;
267 }
268 }
269
270 if (local)
271 return local;
272
273 if (strict)
274 return &ip6_null_entry;
275 }
276 return rt;
277 }
278
279 #ifdef CONFIG_IPV6_ROUTER_PREF
280 static void rt6_probe(struct rt6_info *rt)
281 {
282 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
283 /*
284 * Okay, this does not seem to be appropriate
285 * for now, however, we need to check if it
286 * is really so; aka Router Reachability Probing.
287 *
288 * Router Reachability Probe MUST be rate-limited
289 * to no more than one per minute.
290 */
291 if (!neigh || (neigh->nud_state & NUD_VALID))
292 return;
293 read_lock_bh(&neigh->lock);
294 if (!(neigh->nud_state & NUD_VALID) &&
295 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
296 struct in6_addr mcaddr;
297 struct in6_addr *target;
298
299 neigh->updated = jiffies;
300 read_unlock_bh(&neigh->lock);
301
302 target = (struct in6_addr *)&neigh->primary_key;
303 addrconf_addr_solict_mult(target, &mcaddr);
304 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
305 } else
306 read_unlock_bh(&neigh->lock);
307 }
308 #else
309 static inline void rt6_probe(struct rt6_info *rt)
310 {
311 return;
312 }
313 #endif
314
315 /*
316 * Default Router Selection (RFC 2461 6.3.6)
317 */
318 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
319 {
320 struct net_device *dev = rt->rt6i_dev;
321 if (!oif || dev->ifindex == oif)
322 return 2;
323 if ((dev->flags & IFF_LOOPBACK) &&
324 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
325 return 1;
326 return 0;
327 }
328
329 static inline int rt6_check_neigh(struct rt6_info *rt)
330 {
331 struct neighbour *neigh = rt->rt6i_nexthop;
332 int m;
333 if (rt->rt6i_flags & RTF_NONEXTHOP ||
334 !(rt->rt6i_flags & RTF_GATEWAY))
335 m = 1;
336 else if (neigh) {
337 read_lock_bh(&neigh->lock);
338 if (neigh->nud_state & NUD_VALID)
339 m = 2;
340 #ifdef CONFIG_IPV6_ROUTER_PREF
341 else if (neigh->nud_state & NUD_FAILED)
342 m = 0;
343 #endif
344 else
345 m = 1;
346 read_unlock_bh(&neigh->lock);
347 } else
348 m = 0;
349 return m;
350 }
351
352 static int rt6_score_route(struct rt6_info *rt, int oif,
353 int strict)
354 {
355 int m, n;
356
357 m = rt6_check_dev(rt, oif);
358 if (!m && (strict & RT6_LOOKUP_F_IFACE))
359 return -1;
360 #ifdef CONFIG_IPV6_ROUTER_PREF
361 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
362 #endif
363 n = rt6_check_neigh(rt);
364 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
365 return -1;
366 return m;
367 }
368
369 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
370 int *mpri, struct rt6_info *match)
371 {
372 int m;
373
374 if (rt6_check_expired(rt))
375 goto out;
376
377 m = rt6_score_route(rt, oif, strict);
378 if (m < 0)
379 goto out;
380
381 if (m > *mpri) {
382 if (strict & RT6_LOOKUP_F_REACHABLE)
383 rt6_probe(match);
384 *mpri = m;
385 match = rt;
386 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
387 rt6_probe(rt);
388 }
389
390 out:
391 return match;
392 }
393
394 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
395 struct rt6_info *rr_head,
396 u32 metric, int oif, int strict)
397 {
398 struct rt6_info *rt, *match;
399 int mpri = -1;
400
401 match = NULL;
402 for (rt = rr_head; rt && rt->rt6i_metric == metric;
403 rt = rt->u.dst.rt6_next)
404 match = find_match(rt, oif, strict, &mpri, match);
405 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
406 rt = rt->u.dst.rt6_next)
407 match = find_match(rt, oif, strict, &mpri, match);
408
409 return match;
410 }
411
412 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
413 {
414 struct rt6_info *match, *rt0;
415
416 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
417 __FUNCTION__, fn->leaf, oif);
418
419 rt0 = fn->rr_ptr;
420 if (!rt0)
421 fn->rr_ptr = rt0 = fn->leaf;
422
423 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
424
425 if (!match &&
426 (strict & RT6_LOOKUP_F_REACHABLE)) {
427 struct rt6_info *next = rt0->u.dst.rt6_next;
428
429 /* no entries matched; do round-robin */
430 if (!next || next->rt6i_metric != rt0->rt6i_metric)
431 next = fn->leaf;
432
433 if (next != rt0)
434 fn->rr_ptr = next;
435 }
436
437 RT6_TRACE("%s() => %p\n",
438 __FUNCTION__, match);
439
440 return (match ? match : &ip6_null_entry);
441 }
442
443 #ifdef CONFIG_IPV6_ROUTE_INFO
444 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
445 struct in6_addr *gwaddr)
446 {
447 struct route_info *rinfo = (struct route_info *) opt;
448 struct in6_addr prefix_buf, *prefix;
449 unsigned int pref;
450 u32 lifetime;
451 struct rt6_info *rt;
452
453 if (len < sizeof(struct route_info)) {
454 return -EINVAL;
455 }
456
457 /* Sanity check for prefix_len and length */
458 if (rinfo->length > 3) {
459 return -EINVAL;
460 } else if (rinfo->prefix_len > 128) {
461 return -EINVAL;
462 } else if (rinfo->prefix_len > 64) {
463 if (rinfo->length < 2) {
464 return -EINVAL;
465 }
466 } else if (rinfo->prefix_len > 0) {
467 if (rinfo->length < 1) {
468 return -EINVAL;
469 }
470 }
471
472 pref = rinfo->route_pref;
473 if (pref == ICMPV6_ROUTER_PREF_INVALID)
474 pref = ICMPV6_ROUTER_PREF_MEDIUM;
475
476 lifetime = ntohl(rinfo->lifetime);
477 if (lifetime == 0xffffffff) {
478 /* infinity */
479 } else if (lifetime > 0x7fffffff/HZ) {
480 /* Avoid arithmetic overflow */
481 lifetime = 0x7fffffff/HZ - 1;
482 }
483
484 if (rinfo->length == 3)
485 prefix = (struct in6_addr *)rinfo->prefix;
486 else {
487 /* this function is safe */
488 ipv6_addr_prefix(&prefix_buf,
489 (struct in6_addr *)rinfo->prefix,
490 rinfo->prefix_len);
491 prefix = &prefix_buf;
492 }
493
494 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex);
495
496 if (rt && !lifetime) {
497 ip6_del_rt(rt);
498 rt = NULL;
499 }
500
501 if (!rt && lifetime)
502 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
503 pref);
504 else if (rt)
505 rt->rt6i_flags = RTF_ROUTEINFO |
506 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
507
508 if (rt) {
509 if (lifetime == 0xffffffff) {
510 rt->rt6i_flags &= ~RTF_EXPIRES;
511 } else {
512 rt->rt6i_expires = jiffies + HZ * lifetime;
513 rt->rt6i_flags |= RTF_EXPIRES;
514 }
515 dst_release(&rt->u.dst);
516 }
517 return 0;
518 }
519 #endif
520
521 #define BACKTRACK(saddr) \
522 do { \
523 if (rt == &ip6_null_entry) { \
524 struct fib6_node *pn; \
525 while (1) { \
526 if (fn->fn_flags & RTN_TL_ROOT) \
527 goto out; \
528 pn = fn->parent; \
529 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
530 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
531 else \
532 fn = pn; \
533 if (fn->fn_flags & RTN_RTINFO) \
534 goto restart; \
535 } \
536 } \
537 } while(0)
538
539 static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
540 struct flowi *fl, int flags)
541 {
542 struct fib6_node *fn;
543 struct rt6_info *rt;
544
545 read_lock_bh(&table->tb6_lock);
546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
547 restart:
548 rt = fn->leaf;
549 rt = rt6_device_match(rt, fl->oif, flags);
550 BACKTRACK(&fl->fl6_src);
551 out:
552 dst_use(&rt->u.dst, jiffies);
553 read_unlock_bh(&table->tb6_lock);
554 return rt;
555
556 }
557
558 struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
559 int oif, int strict)
560 {
561 struct flowi fl = {
562 .oif = oif,
563 .nl_u = {
564 .ip6_u = {
565 .daddr = *daddr,
566 },
567 },
568 };
569 struct dst_entry *dst;
570 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
571
572 if (saddr) {
573 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
574 flags |= RT6_LOOKUP_F_HAS_SADDR;
575 }
576
577 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
578 if (dst->error == 0)
579 return (struct rt6_info *) dst;
580
581 dst_release(dst);
582
583 return NULL;
584 }
585
586 EXPORT_SYMBOL(rt6_lookup);
587
588 /* ip6_ins_rt is called with FREE table->tb6_lock.
589 It takes new route entry, the addition fails by any reason the
590 route is freed. In any case, if caller does not hold it, it may
591 be destroyed.
592 */
593
594 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
595 {
596 int err;
597 struct fib6_table *table;
598
599 table = rt->rt6i_table;
600 write_lock_bh(&table->tb6_lock);
601 err = fib6_add(&table->tb6_root, rt, info);
602 write_unlock_bh(&table->tb6_lock);
603
604 return err;
605 }
606
607 int ip6_ins_rt(struct rt6_info *rt)
608 {
609 return __ip6_ins_rt(rt, NULL);
610 }
611
612 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
613 struct in6_addr *saddr)
614 {
615 struct rt6_info *rt;
616
617 /*
618 * Clone the route.
619 */
620
621 rt = ip6_rt_copy(ort);
622
623 if (rt) {
624 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
625 if (rt->rt6i_dst.plen != 128 &&
626 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
627 rt->rt6i_flags |= RTF_ANYCAST;
628 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
629 }
630
631 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
632 rt->rt6i_dst.plen = 128;
633 rt->rt6i_flags |= RTF_CACHE;
634 rt->u.dst.flags |= DST_HOST;
635
636 #ifdef CONFIG_IPV6_SUBTREES
637 if (rt->rt6i_src.plen && saddr) {
638 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
639 rt->rt6i_src.plen = 128;
640 }
641 #endif
642
643 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
644
645 }
646
647 return rt;
648 }
649
650 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
651 {
652 struct rt6_info *rt = ip6_rt_copy(ort);
653 if (rt) {
654 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
655 rt->rt6i_dst.plen = 128;
656 rt->rt6i_flags |= RTF_CACHE;
657 rt->u.dst.flags |= DST_HOST;
658 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
659 }
660 return rt;
661 }
662
663 static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif,
664 struct flowi *fl, int flags)
665 {
666 struct fib6_node *fn;
667 struct rt6_info *rt, *nrt;
668 int strict = 0;
669 int attempts = 3;
670 int err;
671 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
672
673 strict |= flags & RT6_LOOKUP_F_IFACE;
674
675 relookup:
676 read_lock_bh(&table->tb6_lock);
677
678 restart_2:
679 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
680
681 restart:
682 rt = rt6_select(fn, oif, strict | reachable);
683 BACKTRACK(&fl->fl6_src);
684 if (rt == &ip6_null_entry ||
685 rt->rt6i_flags & RTF_CACHE)
686 goto out;
687
688 dst_hold(&rt->u.dst);
689 read_unlock_bh(&table->tb6_lock);
690
691 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
692 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
693 else {
694 #if CLONE_OFFLINK_ROUTE
695 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
696 #else
697 goto out2;
698 #endif
699 }
700
701 dst_release(&rt->u.dst);
702 rt = nrt ? : &ip6_null_entry;
703
704 dst_hold(&rt->u.dst);
705 if (nrt) {
706 err = ip6_ins_rt(nrt);
707 if (!err)
708 goto out2;
709 }
710
711 if (--attempts <= 0)
712 goto out2;
713
714 /*
715 * Race condition! In the gap, when table->tb6_lock was
716 * released someone could insert this route. Relookup.
717 */
718 dst_release(&rt->u.dst);
719 goto relookup;
720
721 out:
722 if (reachable) {
723 reachable = 0;
724 goto restart_2;
725 }
726 dst_hold(&rt->u.dst);
727 read_unlock_bh(&table->tb6_lock);
728 out2:
729 rt->u.dst.lastuse = jiffies;
730 rt->u.dst.__use++;
731
732 return rt;
733 }
734
735 static struct rt6_info *ip6_pol_route_input(struct fib6_table *table,
736 struct flowi *fl, int flags)
737 {
738 return ip6_pol_route(table, fl->iif, fl, flags);
739 }
740
741 void ip6_route_input(struct sk_buff *skb)
742 {
743 struct ipv6hdr *iph = ipv6_hdr(skb);
744 int flags = RT6_LOOKUP_F_HAS_SADDR;
745 struct flowi fl = {
746 .iif = skb->dev->ifindex,
747 .nl_u = {
748 .ip6_u = {
749 .daddr = iph->daddr,
750 .saddr = iph->saddr,
751 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
752 },
753 },
754 .mark = skb->mark,
755 .proto = iph->nexthdr,
756 };
757
758 if (rt6_need_strict(&iph->daddr))
759 flags |= RT6_LOOKUP_F_IFACE;
760
761 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
762 }
763
764 static struct rt6_info *ip6_pol_route_output(struct fib6_table *table,
765 struct flowi *fl, int flags)
766 {
767 return ip6_pol_route(table, fl->oif, fl, flags);
768 }
769
770 struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
771 {
772 int flags = 0;
773
774 if (rt6_need_strict(&fl->fl6_dst))
775 flags |= RT6_LOOKUP_F_IFACE;
776
777 if (!ipv6_addr_any(&fl->fl6_src))
778 flags |= RT6_LOOKUP_F_HAS_SADDR;
779
780 return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
781 }
782
783 EXPORT_SYMBOL(ip6_route_output);
784
785 int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
786 {
787 struct rt6_info *ort = (struct rt6_info *) *dstp;
788 struct rt6_info *rt = (struct rt6_info *)
789 dst_alloc(&ip6_dst_blackhole_ops);
790 struct dst_entry *new = NULL;
791
792 if (rt) {
793 new = &rt->u.dst;
794
795 atomic_set(&new->__refcnt, 1);
796 new->__use = 1;
797 new->input = dst_discard;
798 new->output = dst_discard;
799
800 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
801 new->dev = ort->u.dst.dev;
802 if (new->dev)
803 dev_hold(new->dev);
804 rt->rt6i_idev = ort->rt6i_idev;
805 if (rt->rt6i_idev)
806 in6_dev_hold(rt->rt6i_idev);
807 rt->rt6i_expires = 0;
808
809 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
810 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
811 rt->rt6i_metric = 0;
812
813 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
814 #ifdef CONFIG_IPV6_SUBTREES
815 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
816 #endif
817
818 dst_free(new);
819 }
820
821 dst_release(*dstp);
822 *dstp = new;
823 return (new ? 0 : -ENOMEM);
824 }
825 EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
826
827 /*
828 * Destination cache support functions
829 */
830
831 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
832 {
833 struct rt6_info *rt;
834
835 rt = (struct rt6_info *) dst;
836
837 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
838 return dst;
839
840 return NULL;
841 }
842
843 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
844 {
845 struct rt6_info *rt = (struct rt6_info *) dst;
846
847 if (rt) {
848 if (rt->rt6i_flags & RTF_CACHE)
849 ip6_del_rt(rt);
850 else
851 dst_release(dst);
852 }
853 return NULL;
854 }
855
856 static void ip6_link_failure(struct sk_buff *skb)
857 {
858 struct rt6_info *rt;
859
860 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
861
862 rt = (struct rt6_info *) skb->dst;
863 if (rt) {
864 if (rt->rt6i_flags&RTF_CACHE) {
865 dst_set_expires(&rt->u.dst, 0);
866 rt->rt6i_flags |= RTF_EXPIRES;
867 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
868 rt->rt6i_node->fn_sernum = -1;
869 }
870 }
871
872 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
873 {
874 struct rt6_info *rt6 = (struct rt6_info*)dst;
875
876 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
877 rt6->rt6i_flags |= RTF_MODIFIED;
878 if (mtu < IPV6_MIN_MTU) {
879 mtu = IPV6_MIN_MTU;
880 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
881 }
882 dst->metrics[RTAX_MTU-1] = mtu;
883 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
884 }
885 }
886
887 static int ipv6_get_mtu(struct net_device *dev);
888
889 static inline unsigned int ipv6_advmss(unsigned int mtu)
890 {
891 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
892
893 if (mtu < ip6_rt_min_advmss)
894 mtu = ip6_rt_min_advmss;
895
896 /*
897 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
898 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
899 * IPV6_MAXPLEN is also valid and means: "any MSS,
900 * rely only on pmtu discovery"
901 */
902 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
903 mtu = IPV6_MAXPLEN;
904 return mtu;
905 }
906
907 static struct dst_entry *ndisc_dst_gc_list;
908 static DEFINE_SPINLOCK(ndisc_lock);
909
910 struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
911 struct neighbour *neigh,
912 struct in6_addr *addr,
913 int (*output)(struct sk_buff *))
914 {
915 struct rt6_info *rt;
916 struct inet6_dev *idev = in6_dev_get(dev);
917
918 if (unlikely(idev == NULL))
919 return NULL;
920
921 rt = ip6_dst_alloc();
922 if (unlikely(rt == NULL)) {
923 in6_dev_put(idev);
924 goto out;
925 }
926
927 dev_hold(dev);
928 if (neigh)
929 neigh_hold(neigh);
930 else
931 neigh = ndisc_get_neigh(dev, addr);
932
933 rt->rt6i_dev = dev;
934 rt->rt6i_idev = idev;
935 rt->rt6i_nexthop = neigh;
936 atomic_set(&rt->u.dst.__refcnt, 1);
937 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
938 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
939 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
940 rt->u.dst.output = output;
941
942 #if 0 /* there's no chance to use these for ndisc */
943 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
944 ? DST_HOST
945 : 0;
946 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
947 rt->rt6i_dst.plen = 128;
948 #endif
949
950 spin_lock_bh(&ndisc_lock);
951 rt->u.dst.next = ndisc_dst_gc_list;
952 ndisc_dst_gc_list = &rt->u.dst;
953 spin_unlock_bh(&ndisc_lock);
954
955 fib6_force_start_gc();
956
957 out:
958 return &rt->u.dst;
959 }
960
961 int ndisc_dst_gc(int *more)
962 {
963 struct dst_entry *dst, *next, **pprev;
964 int freed;
965
966 next = NULL;
967 freed = 0;
968
969 spin_lock_bh(&ndisc_lock);
970 pprev = &ndisc_dst_gc_list;
971
972 while ((dst = *pprev) != NULL) {
973 if (!atomic_read(&dst->__refcnt)) {
974 *pprev = dst->next;
975 dst_free(dst);
976 freed++;
977 } else {
978 pprev = &dst->next;
979 (*more)++;
980 }
981 }
982
983 spin_unlock_bh(&ndisc_lock);
984
985 return freed;
986 }
987
988 static int ip6_dst_gc(void)
989 {
990 static unsigned expire = 30*HZ;
991 static unsigned long last_gc;
992 unsigned long now = jiffies;
993
994 if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
995 atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
996 goto out;
997
998 expire++;
999 fib6_run_gc(expire);
1000 last_gc = now;
1001 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh)
1002 expire = ip6_rt_gc_timeout>>1;
1003
1004 out:
1005 expire -= expire>>ip6_rt_gc_elasticity;
1006 return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
1007 }
1008
1009 /* Clean host part of a prefix. Not necessary in radix tree,
1010 but results in cleaner routing tables.
1011
1012 Remove it only when all the things will work!
1013 */
1014
1015 static int ipv6_get_mtu(struct net_device *dev)
1016 {
1017 int mtu = IPV6_MIN_MTU;
1018 struct inet6_dev *idev;
1019
1020 idev = in6_dev_get(dev);
1021 if (idev) {
1022 mtu = idev->cnf.mtu6;
1023 in6_dev_put(idev);
1024 }
1025 return mtu;
1026 }
1027
1028 int ipv6_get_hoplimit(struct net_device *dev)
1029 {
1030 int hoplimit = ipv6_devconf.hop_limit;
1031 struct inet6_dev *idev;
1032
1033 idev = in6_dev_get(dev);
1034 if (idev) {
1035 hoplimit = idev->cnf.hop_limit;
1036 in6_dev_put(idev);
1037 }
1038 return hoplimit;
1039 }
1040
1041 /*
1042 *
1043 */
1044
1045 int ip6_route_add(struct fib6_config *cfg)
1046 {
1047 int err;
1048 struct rt6_info *rt = NULL;
1049 struct net_device *dev = NULL;
1050 struct inet6_dev *idev = NULL;
1051 struct fib6_table *table;
1052 int addr_type;
1053
1054 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1055 return -EINVAL;
1056 #ifndef CONFIG_IPV6_SUBTREES
1057 if (cfg->fc_src_len)
1058 return -EINVAL;
1059 #endif
1060 if (cfg->fc_ifindex) {
1061 err = -ENODEV;
1062 dev = dev_get_by_index(&init_net, cfg->fc_ifindex);
1063 if (!dev)
1064 goto out;
1065 idev = in6_dev_get(dev);
1066 if (!idev)
1067 goto out;
1068 }
1069
1070 if (cfg->fc_metric == 0)
1071 cfg->fc_metric = IP6_RT_PRIO_USER;
1072
1073 table = fib6_new_table(cfg->fc_table);
1074 if (table == NULL) {
1075 err = -ENOBUFS;
1076 goto out;
1077 }
1078
1079 rt = ip6_dst_alloc();
1080
1081 if (rt == NULL) {
1082 err = -ENOMEM;
1083 goto out;
1084 }
1085
1086 rt->u.dst.obsolete = -1;
1087 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires);
1088
1089 if (cfg->fc_protocol == RTPROT_UNSPEC)
1090 cfg->fc_protocol = RTPROT_BOOT;
1091 rt->rt6i_protocol = cfg->fc_protocol;
1092
1093 addr_type = ipv6_addr_type(&cfg->fc_dst);
1094
1095 if (addr_type & IPV6_ADDR_MULTICAST)
1096 rt->u.dst.input = ip6_mc_input;
1097 else
1098 rt->u.dst.input = ip6_forward;
1099
1100 rt->u.dst.output = ip6_output;
1101
1102 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1103 rt->rt6i_dst.plen = cfg->fc_dst_len;
1104 if (rt->rt6i_dst.plen == 128)
1105 rt->u.dst.flags = DST_HOST;
1106
1107 #ifdef CONFIG_IPV6_SUBTREES
1108 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1109 rt->rt6i_src.plen = cfg->fc_src_len;
1110 #endif
1111
1112 rt->rt6i_metric = cfg->fc_metric;
1113
1114 /* We cannot add true routes via loopback here,
1115 they would result in kernel looping; promote them to reject routes
1116 */
1117 if ((cfg->fc_flags & RTF_REJECT) ||
1118 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1119 /* hold loopback dev/idev if we haven't done so. */
1120 if (dev != init_net.loopback_dev) {
1121 if (dev) {
1122 dev_put(dev);
1123 in6_dev_put(idev);
1124 }
1125 dev = init_net.loopback_dev;
1126 dev_hold(dev);
1127 idev = in6_dev_get(dev);
1128 if (!idev) {
1129 err = -ENODEV;
1130 goto out;
1131 }
1132 }
1133 rt->u.dst.output = ip6_pkt_discard_out;
1134 rt->u.dst.input = ip6_pkt_discard;
1135 rt->u.dst.error = -ENETUNREACH;
1136 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1137 goto install_route;
1138 }
1139
1140 if (cfg->fc_flags & RTF_GATEWAY) {
1141 struct in6_addr *gw_addr;
1142 int gwa_type;
1143
1144 gw_addr = &cfg->fc_gateway;
1145 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1146 gwa_type = ipv6_addr_type(gw_addr);
1147
1148 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1149 struct rt6_info *grt;
1150
1151 /* IPv6 strictly inhibits using not link-local
1152 addresses as nexthop address.
1153 Otherwise, router will not able to send redirects.
1154 It is very good, but in some (rare!) circumstances
1155 (SIT, PtP, NBMA NOARP links) it is handy to allow
1156 some exceptions. --ANK
1157 */
1158 err = -EINVAL;
1159 if (!(gwa_type&IPV6_ADDR_UNICAST))
1160 goto out;
1161
1162 grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1);
1163
1164 err = -EHOSTUNREACH;
1165 if (grt == NULL)
1166 goto out;
1167 if (dev) {
1168 if (dev != grt->rt6i_dev) {
1169 dst_release(&grt->u.dst);
1170 goto out;
1171 }
1172 } else {
1173 dev = grt->rt6i_dev;
1174 idev = grt->rt6i_idev;
1175 dev_hold(dev);
1176 in6_dev_hold(grt->rt6i_idev);
1177 }
1178 if (!(grt->rt6i_flags&RTF_GATEWAY))
1179 err = 0;
1180 dst_release(&grt->u.dst);
1181
1182 if (err)
1183 goto out;
1184 }
1185 err = -EINVAL;
1186 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1187 goto out;
1188 }
1189
1190 err = -ENODEV;
1191 if (dev == NULL)
1192 goto out;
1193
1194 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1195 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1196 if (IS_ERR(rt->rt6i_nexthop)) {
1197 err = PTR_ERR(rt->rt6i_nexthop);
1198 rt->rt6i_nexthop = NULL;
1199 goto out;
1200 }
1201 }
1202
1203 rt->rt6i_flags = cfg->fc_flags;
1204
1205 install_route:
1206 if (cfg->fc_mx) {
1207 struct nlattr *nla;
1208 int remaining;
1209
1210 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1211 int type = nla_type(nla);
1212
1213 if (type) {
1214 if (type > RTAX_MAX) {
1215 err = -EINVAL;
1216 goto out;
1217 }
1218
1219 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1220 }
1221 }
1222 }
1223
1224 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1225 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1226 if (!rt->u.dst.metrics[RTAX_MTU-1])
1227 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1228 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1229 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1230 rt->u.dst.dev = dev;
1231 rt->rt6i_idev = idev;
1232 rt->rt6i_table = table;
1233 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1234
1235 out:
1236 if (dev)
1237 dev_put(dev);
1238 if (idev)
1239 in6_dev_put(idev);
1240 if (rt)
1241 dst_free(&rt->u.dst);
1242 return err;
1243 }
1244
1245 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1246 {
1247 int err;
1248 struct fib6_table *table;
1249
1250 if (rt == &ip6_null_entry)
1251 return -ENOENT;
1252
1253 table = rt->rt6i_table;
1254 write_lock_bh(&table->tb6_lock);
1255
1256 err = fib6_del(rt, info);
1257 dst_release(&rt->u.dst);
1258
1259 write_unlock_bh(&table->tb6_lock);
1260
1261 return err;
1262 }
1263
1264 int ip6_del_rt(struct rt6_info *rt)
1265 {
1266 return __ip6_del_rt(rt, NULL);
1267 }
1268
1269 static int ip6_route_del(struct fib6_config *cfg)
1270 {
1271 struct fib6_table *table;
1272 struct fib6_node *fn;
1273 struct rt6_info *rt;
1274 int err = -ESRCH;
1275
1276 table = fib6_get_table(cfg->fc_table);
1277 if (table == NULL)
1278 return err;
1279
1280 read_lock_bh(&table->tb6_lock);
1281
1282 fn = fib6_locate(&table->tb6_root,
1283 &cfg->fc_dst, cfg->fc_dst_len,
1284 &cfg->fc_src, cfg->fc_src_len);
1285
1286 if (fn) {
1287 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1288 if (cfg->fc_ifindex &&
1289 (rt->rt6i_dev == NULL ||
1290 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1291 continue;
1292 if (cfg->fc_flags & RTF_GATEWAY &&
1293 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1294 continue;
1295 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1296 continue;
1297 dst_hold(&rt->u.dst);
1298 read_unlock_bh(&table->tb6_lock);
1299
1300 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1301 }
1302 }
1303 read_unlock_bh(&table->tb6_lock);
1304
1305 return err;
1306 }
1307
1308 /*
1309 * Handle redirects
1310 */
1311 struct ip6rd_flowi {
1312 struct flowi fl;
1313 struct in6_addr gateway;
1314 };
1315
1316 static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
1317 struct flowi *fl,
1318 int flags)
1319 {
1320 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1321 struct rt6_info *rt;
1322 struct fib6_node *fn;
1323
1324 /*
1325 * Get the "current" route for this destination and
1326 * check if the redirect has come from approriate router.
1327 *
1328 * RFC 2461 specifies that redirects should only be
1329 * accepted if they come from the nexthop to the target.
1330 * Due to the way the routes are chosen, this notion
1331 * is a bit fuzzy and one might need to check all possible
1332 * routes.
1333 */
1334
1335 read_lock_bh(&table->tb6_lock);
1336 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1337 restart:
1338 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1339 /*
1340 * Current route is on-link; redirect is always invalid.
1341 *
1342 * Seems, previous statement is not true. It could
1343 * be node, which looks for us as on-link (f.e. proxy ndisc)
1344 * But then router serving it might decide, that we should
1345 * know truth 8)8) --ANK (980726).
1346 */
1347 if (rt6_check_expired(rt))
1348 continue;
1349 if (!(rt->rt6i_flags & RTF_GATEWAY))
1350 continue;
1351 if (fl->oif != rt->rt6i_dev->ifindex)
1352 continue;
1353 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1354 continue;
1355 break;
1356 }
1357
1358 if (!rt)
1359 rt = &ip6_null_entry;
1360 BACKTRACK(&fl->fl6_src);
1361 out:
1362 dst_hold(&rt->u.dst);
1363
1364 read_unlock_bh(&table->tb6_lock);
1365
1366 return rt;
1367 };
1368
1369 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1370 struct in6_addr *src,
1371 struct in6_addr *gateway,
1372 struct net_device *dev)
1373 {
1374 int flags = RT6_LOOKUP_F_HAS_SADDR;
1375 struct ip6rd_flowi rdfl = {
1376 .fl = {
1377 .oif = dev->ifindex,
1378 .nl_u = {
1379 .ip6_u = {
1380 .daddr = *dest,
1381 .saddr = *src,
1382 },
1383 },
1384 },
1385 .gateway = *gateway,
1386 };
1387
1388 if (rt6_need_strict(dest))
1389 flags |= RT6_LOOKUP_F_IFACE;
1390
1391 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect);
1392 }
1393
1394 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1395 struct in6_addr *saddr,
1396 struct neighbour *neigh, u8 *lladdr, int on_link)
1397 {
1398 struct rt6_info *rt, *nrt = NULL;
1399 struct netevent_redirect netevent;
1400
1401 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1402
1403 if (rt == &ip6_null_entry) {
1404 if (net_ratelimit())
1405 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1406 "for redirect target\n");
1407 goto out;
1408 }
1409
1410 /*
1411 * We have finally decided to accept it.
1412 */
1413
1414 neigh_update(neigh, lladdr, NUD_STALE,
1415 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1416 NEIGH_UPDATE_F_OVERRIDE|
1417 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1418 NEIGH_UPDATE_F_ISROUTER))
1419 );
1420
1421 /*
1422 * Redirect received -> path was valid.
1423 * Look, redirects are sent only in response to data packets,
1424 * so that this nexthop apparently is reachable. --ANK
1425 */
1426 dst_confirm(&rt->u.dst);
1427
1428 /* Duplicate redirect: silently ignore. */
1429 if (neigh == rt->u.dst.neighbour)
1430 goto out;
1431
1432 nrt = ip6_rt_copy(rt);
1433 if (nrt == NULL)
1434 goto out;
1435
1436 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1437 if (on_link)
1438 nrt->rt6i_flags &= ~RTF_GATEWAY;
1439
1440 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1441 nrt->rt6i_dst.plen = 128;
1442 nrt->u.dst.flags |= DST_HOST;
1443
1444 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1445 nrt->rt6i_nexthop = neigh_clone(neigh);
1446 /* Reset pmtu, it may be better */
1447 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1448 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
1449
1450 if (ip6_ins_rt(nrt))
1451 goto out;
1452
1453 netevent.old = &rt->u.dst;
1454 netevent.new = &nrt->u.dst;
1455 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1456
1457 if (rt->rt6i_flags&RTF_CACHE) {
1458 ip6_del_rt(rt);
1459 return;
1460 }
1461
1462 out:
1463 dst_release(&rt->u.dst);
1464 return;
1465 }
1466
1467 /*
1468 * Handle ICMP "packet too big" messages
1469 * i.e. Path MTU discovery
1470 */
1471
1472 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1473 struct net_device *dev, u32 pmtu)
1474 {
1475 struct rt6_info *rt, *nrt;
1476 int allfrag = 0;
1477
1478 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0);
1479 if (rt == NULL)
1480 return;
1481
1482 if (pmtu >= dst_mtu(&rt->u.dst))
1483 goto out;
1484
1485 if (pmtu < IPV6_MIN_MTU) {
1486 /*
1487 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1488 * MTU (1280) and a fragment header should always be included
1489 * after a node receiving Too Big message reporting PMTU is
1490 * less than the IPv6 Minimum Link MTU.
1491 */
1492 pmtu = IPV6_MIN_MTU;
1493 allfrag = 1;
1494 }
1495
1496 /* New mtu received -> path was valid.
1497 They are sent only in response to data packets,
1498 so that this nexthop apparently is reachable. --ANK
1499 */
1500 dst_confirm(&rt->u.dst);
1501
1502 /* Host route. If it is static, it would be better
1503 not to override it, but add new one, so that
1504 when cache entry will expire old pmtu
1505 would return automatically.
1506 */
1507 if (rt->rt6i_flags & RTF_CACHE) {
1508 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1509 if (allfrag)
1510 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1511 dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
1512 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1513 goto out;
1514 }
1515
1516 /* Network route.
1517 Two cases are possible:
1518 1. It is connected route. Action: COW
1519 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1520 */
1521 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1522 nrt = rt6_alloc_cow(rt, daddr, saddr);
1523 else
1524 nrt = rt6_alloc_clone(rt, daddr);
1525
1526 if (nrt) {
1527 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1528 if (allfrag)
1529 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1530
1531 /* According to RFC 1981, detecting PMTU increase shouldn't be
1532 * happened within 5 mins, the recommended timer is 10 mins.
1533 * Here this route expiration time is set to ip6_rt_mtu_expires
1534 * which is 10 mins. After 10 mins the decreased pmtu is expired
1535 * and detecting PMTU increase will be automatically happened.
1536 */
1537 dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
1538 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1539
1540 ip6_ins_rt(nrt);
1541 }
1542 out:
1543 dst_release(&rt->u.dst);
1544 }
1545
1546 /*
1547 * Misc support functions
1548 */
1549
1550 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1551 {
1552 struct rt6_info *rt = ip6_dst_alloc();
1553
1554 if (rt) {
1555 rt->u.dst.input = ort->u.dst.input;
1556 rt->u.dst.output = ort->u.dst.output;
1557
1558 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1559 rt->u.dst.error = ort->u.dst.error;
1560 rt->u.dst.dev = ort->u.dst.dev;
1561 if (rt->u.dst.dev)
1562 dev_hold(rt->u.dst.dev);
1563 rt->rt6i_idev = ort->rt6i_idev;
1564 if (rt->rt6i_idev)
1565 in6_dev_hold(rt->rt6i_idev);
1566 rt->u.dst.lastuse = jiffies;
1567 rt->rt6i_expires = 0;
1568
1569 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1570 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1571 rt->rt6i_metric = 0;
1572
1573 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1574 #ifdef CONFIG_IPV6_SUBTREES
1575 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1576 #endif
1577 rt->rt6i_table = ort->rt6i_table;
1578 }
1579 return rt;
1580 }
1581
1582 #ifdef CONFIG_IPV6_ROUTE_INFO
1583 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
1584 struct in6_addr *gwaddr, int ifindex)
1585 {
1586 struct fib6_node *fn;
1587 struct rt6_info *rt = NULL;
1588 struct fib6_table *table;
1589
1590 table = fib6_get_table(RT6_TABLE_INFO);
1591 if (table == NULL)
1592 return NULL;
1593
1594 write_lock_bh(&table->tb6_lock);
1595 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1596 if (!fn)
1597 goto out;
1598
1599 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1600 if (rt->rt6i_dev->ifindex != ifindex)
1601 continue;
1602 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1603 continue;
1604 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1605 continue;
1606 dst_hold(&rt->u.dst);
1607 break;
1608 }
1609 out:
1610 write_unlock_bh(&table->tb6_lock);
1611 return rt;
1612 }
1613
1614 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
1615 struct in6_addr *gwaddr, int ifindex,
1616 unsigned pref)
1617 {
1618 struct fib6_config cfg = {
1619 .fc_table = RT6_TABLE_INFO,
1620 .fc_metric = 1024,
1621 .fc_ifindex = ifindex,
1622 .fc_dst_len = prefixlen,
1623 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1624 RTF_UP | RTF_PREF(pref),
1625 };
1626
1627 ipv6_addr_copy(&cfg.fc_dst, prefix);
1628 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1629
1630 /* We should treat it as a default route if prefix length is 0. */
1631 if (!prefixlen)
1632 cfg.fc_flags |= RTF_DEFAULT;
1633
1634 ip6_route_add(&cfg);
1635
1636 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex);
1637 }
1638 #endif
1639
1640 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1641 {
1642 struct rt6_info *rt;
1643 struct fib6_table *table;
1644
1645 table = fib6_get_table(RT6_TABLE_DFLT);
1646 if (table == NULL)
1647 return NULL;
1648
1649 write_lock_bh(&table->tb6_lock);
1650 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1651 if (dev == rt->rt6i_dev &&
1652 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1653 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1654 break;
1655 }
1656 if (rt)
1657 dst_hold(&rt->u.dst);
1658 write_unlock_bh(&table->tb6_lock);
1659 return rt;
1660 }
1661
1662 EXPORT_SYMBOL(rt6_get_dflt_router);
1663
1664 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1665 struct net_device *dev,
1666 unsigned int pref)
1667 {
1668 struct fib6_config cfg = {
1669 .fc_table = RT6_TABLE_DFLT,
1670 .fc_metric = 1024,
1671 .fc_ifindex = dev->ifindex,
1672 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1673 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1674 };
1675
1676 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1677
1678 ip6_route_add(&cfg);
1679
1680 return rt6_get_dflt_router(gwaddr, dev);
1681 }
1682
1683 void rt6_purge_dflt_routers(void)
1684 {
1685 struct rt6_info *rt;
1686 struct fib6_table *table;
1687
1688 /* NOTE: Keep consistent with rt6_get_dflt_router */
1689 table = fib6_get_table(RT6_TABLE_DFLT);
1690 if (table == NULL)
1691 return;
1692
1693 restart:
1694 read_lock_bh(&table->tb6_lock);
1695 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1696 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1697 dst_hold(&rt->u.dst);
1698 read_unlock_bh(&table->tb6_lock);
1699 ip6_del_rt(rt);
1700 goto restart;
1701 }
1702 }
1703 read_unlock_bh(&table->tb6_lock);
1704 }
1705
1706 static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
1707 struct fib6_config *cfg)
1708 {
1709 memset(cfg, 0, sizeof(*cfg));
1710
1711 cfg->fc_table = RT6_TABLE_MAIN;
1712 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1713 cfg->fc_metric = rtmsg->rtmsg_metric;
1714 cfg->fc_expires = rtmsg->rtmsg_info;
1715 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1716 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1717 cfg->fc_flags = rtmsg->rtmsg_flags;
1718
1719 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1720 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1721 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1722 }
1723
1724 int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1725 {
1726 struct fib6_config cfg;
1727 struct in6_rtmsg rtmsg;
1728 int err;
1729
1730 switch(cmd) {
1731 case SIOCADDRT: /* Add a route */
1732 case SIOCDELRT: /* Delete a route */
1733 if (!capable(CAP_NET_ADMIN))
1734 return -EPERM;
1735 err = copy_from_user(&rtmsg, arg,
1736 sizeof(struct in6_rtmsg));
1737 if (err)
1738 return -EFAULT;
1739
1740 rtmsg_to_fib6_config(&rtmsg, &cfg);
1741
1742 rtnl_lock();
1743 switch (cmd) {
1744 case SIOCADDRT:
1745 err = ip6_route_add(&cfg);
1746 break;
1747 case SIOCDELRT:
1748 err = ip6_route_del(&cfg);
1749 break;
1750 default:
1751 err = -EINVAL;
1752 }
1753 rtnl_unlock();
1754
1755 return err;
1756 }
1757
1758 return -EINVAL;
1759 }
1760
1761 /*
1762 * Drop the packet on the floor
1763 */
1764
1765 static inline int ip6_pkt_drop(struct sk_buff *skb, int code,
1766 int ipstats_mib_noroutes)
1767 {
1768 int type;
1769 switch (ipstats_mib_noroutes) {
1770 case IPSTATS_MIB_INNOROUTES:
1771 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1772 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
1773 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
1774 break;
1775 }
1776 /* FALLTHROUGH */
1777 case IPSTATS_MIB_OUTNOROUTES:
1778 IP6_INC_STATS(ip6_dst_idev(skb->dst), ipstats_mib_noroutes);
1779 break;
1780 }
1781 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1782 kfree_skb(skb);
1783 return 0;
1784 }
1785
1786 static int ip6_pkt_discard(struct sk_buff *skb)
1787 {
1788 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1789 }
1790
1791 static int ip6_pkt_discard_out(struct sk_buff *skb)
1792 {
1793 skb->dev = skb->dst->dev;
1794 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1795 }
1796
1797 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1798
1799 static int ip6_pkt_prohibit(struct sk_buff *skb)
1800 {
1801 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1802 }
1803
1804 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1805 {
1806 skb->dev = skb->dst->dev;
1807 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1808 }
1809
1810 #endif
1811
1812 /*
1813 * Allocate a dst for local (unicast / anycast) address.
1814 */
1815
1816 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1817 const struct in6_addr *addr,
1818 int anycast)
1819 {
1820 struct rt6_info *rt = ip6_dst_alloc();
1821
1822 if (rt == NULL)
1823 return ERR_PTR(-ENOMEM);
1824
1825 dev_hold(init_net.loopback_dev);
1826 in6_dev_hold(idev);
1827
1828 rt->u.dst.flags = DST_HOST;
1829 rt->u.dst.input = ip6_input;
1830 rt->u.dst.output = ip6_output;
1831 rt->rt6i_dev = init_net.loopback_dev;
1832 rt->rt6i_idev = idev;
1833 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1834 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1835 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1836 rt->u.dst.obsolete = -1;
1837
1838 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1839 if (anycast)
1840 rt->rt6i_flags |= RTF_ANYCAST;
1841 else
1842 rt->rt6i_flags |= RTF_LOCAL;
1843 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1844 if (rt->rt6i_nexthop == NULL) {
1845 dst_free(&rt->u.dst);
1846 return ERR_PTR(-ENOMEM);
1847 }
1848
1849 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1850 rt->rt6i_dst.plen = 128;
1851 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL);
1852
1853 atomic_set(&rt->u.dst.__refcnt, 1);
1854
1855 return rt;
1856 }
1857
1858 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1859 {
1860 if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
1861 rt != &ip6_null_entry) {
1862 RT6_TRACE("deleted by ifdown %p\n", rt);
1863 return -1;
1864 }
1865 return 0;
1866 }
1867
1868 void rt6_ifdown(struct net_device *dev)
1869 {
1870 fib6_clean_all(fib6_ifdown, 0, dev);
1871 }
1872
1873 struct rt6_mtu_change_arg
1874 {
1875 struct net_device *dev;
1876 unsigned mtu;
1877 };
1878
1879 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1880 {
1881 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1882 struct inet6_dev *idev;
1883
1884 /* In IPv6 pmtu discovery is not optional,
1885 so that RTAX_MTU lock cannot disable it.
1886 We still use this lock to block changes
1887 caused by addrconf/ndisc.
1888 */
1889
1890 idev = __in6_dev_get(arg->dev);
1891 if (idev == NULL)
1892 return 0;
1893
1894 /* For administrative MTU increase, there is no way to discover
1895 IPv6 PMTU increase, so PMTU increase should be updated here.
1896 Since RFC 1981 doesn't include administrative MTU increase
1897 update PMTU increase is a MUST. (i.e. jumbo frame)
1898 */
1899 /*
1900 If new MTU is less than route PMTU, this new MTU will be the
1901 lowest MTU in the path, update the route PMTU to reflect PMTU
1902 decreases; if new MTU is greater than route PMTU, and the
1903 old MTU is the lowest MTU in the path, update the route PMTU
1904 to reflect the increase. In this case if the other nodes' MTU
1905 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1906 PMTU discouvery.
1907 */
1908 if (rt->rt6i_dev == arg->dev &&
1909 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1910 (dst_mtu(&rt->u.dst) > arg->mtu ||
1911 (dst_mtu(&rt->u.dst) < arg->mtu &&
1912 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1913 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1914 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
1915 }
1916 return 0;
1917 }
1918
1919 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1920 {
1921 struct rt6_mtu_change_arg arg = {
1922 .dev = dev,
1923 .mtu = mtu,
1924 };
1925
1926 fib6_clean_all(rt6_mtu_change_route, 0, &arg);
1927 }
1928
1929 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
1930 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
1931 [RTA_OIF] = { .type = NLA_U32 },
1932 [RTA_IIF] = { .type = NLA_U32 },
1933 [RTA_PRIORITY] = { .type = NLA_U32 },
1934 [RTA_METRICS] = { .type = NLA_NESTED },
1935 };
1936
1937 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1938 struct fib6_config *cfg)
1939 {
1940 struct rtmsg *rtm;
1941 struct nlattr *tb[RTA_MAX+1];
1942 int err;
1943
1944 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
1945 if (err < 0)
1946 goto errout;
1947
1948 err = -EINVAL;
1949 rtm = nlmsg_data(nlh);
1950 memset(cfg, 0, sizeof(*cfg));
1951
1952 cfg->fc_table = rtm->rtm_table;
1953 cfg->fc_dst_len = rtm->rtm_dst_len;
1954 cfg->fc_src_len = rtm->rtm_src_len;
1955 cfg->fc_flags = RTF_UP;
1956 cfg->fc_protocol = rtm->rtm_protocol;
1957
1958 if (rtm->rtm_type == RTN_UNREACHABLE)
1959 cfg->fc_flags |= RTF_REJECT;
1960
1961 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
1962 cfg->fc_nlinfo.nlh = nlh;
1963
1964 if (tb[RTA_GATEWAY]) {
1965 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
1966 cfg->fc_flags |= RTF_GATEWAY;
1967 }
1968
1969 if (tb[RTA_DST]) {
1970 int plen = (rtm->rtm_dst_len + 7) >> 3;
1971
1972 if (nla_len(tb[RTA_DST]) < plen)
1973 goto errout;
1974
1975 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
1976 }
1977
1978 if (tb[RTA_SRC]) {
1979 int plen = (rtm->rtm_src_len + 7) >> 3;
1980
1981 if (nla_len(tb[RTA_SRC]) < plen)
1982 goto errout;
1983
1984 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
1985 }
1986
1987 if (tb[RTA_OIF])
1988 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
1989
1990 if (tb[RTA_PRIORITY])
1991 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
1992
1993 if (tb[RTA_METRICS]) {
1994 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
1995 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
1996 }
1997
1998 if (tb[RTA_TABLE])
1999 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2000
2001 err = 0;
2002 errout:
2003 return err;
2004 }
2005
2006 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2007 {
2008 struct net *net = skb->sk->sk_net;
2009 struct fib6_config cfg;
2010 int err;
2011
2012 if (net != &init_net)
2013 return -EINVAL;
2014
2015 err = rtm_to_fib6_config(skb, nlh, &cfg);
2016 if (err < 0)
2017 return err;
2018
2019 return ip6_route_del(&cfg);
2020 }
2021
2022 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2023 {
2024 struct net *net = skb->sk->sk_net;
2025 struct fib6_config cfg;
2026 int err;
2027
2028 if (net != &init_net)
2029 return -EINVAL;
2030
2031 err = rtm_to_fib6_config(skb, nlh, &cfg);
2032 if (err < 0)
2033 return err;
2034
2035 return ip6_route_add(&cfg);
2036 }
2037
2038 static inline size_t rt6_nlmsg_size(void)
2039 {
2040 return NLMSG_ALIGN(sizeof(struct rtmsg))
2041 + nla_total_size(16) /* RTA_SRC */
2042 + nla_total_size(16) /* RTA_DST */
2043 + nla_total_size(16) /* RTA_GATEWAY */
2044 + nla_total_size(16) /* RTA_PREFSRC */
2045 + nla_total_size(4) /* RTA_TABLE */
2046 + nla_total_size(4) /* RTA_IIF */
2047 + nla_total_size(4) /* RTA_OIF */
2048 + nla_total_size(4) /* RTA_PRIORITY */
2049 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2050 + nla_total_size(sizeof(struct rta_cacheinfo));
2051 }
2052
2053 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2054 struct in6_addr *dst, struct in6_addr *src,
2055 int iif, int type, u32 pid, u32 seq,
2056 int prefix, unsigned int flags)
2057 {
2058 struct rtmsg *rtm;
2059 struct nlmsghdr *nlh;
2060 long expires;
2061 u32 table;
2062
2063 if (prefix) { /* user wants prefix routes only */
2064 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2065 /* success since this is not a prefix route */
2066 return 1;
2067 }
2068 }
2069
2070 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2071 if (nlh == NULL)
2072 return -EMSGSIZE;
2073
2074 rtm = nlmsg_data(nlh);
2075 rtm->rtm_family = AF_INET6;
2076 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2077 rtm->rtm_src_len = rt->rt6i_src.plen;
2078 rtm->rtm_tos = 0;
2079 if (rt->rt6i_table)
2080 table = rt->rt6i_table->tb6_id;
2081 else
2082 table = RT6_TABLE_UNSPEC;
2083 rtm->rtm_table = table;
2084 NLA_PUT_U32(skb, RTA_TABLE, table);
2085 if (rt->rt6i_flags&RTF_REJECT)
2086 rtm->rtm_type = RTN_UNREACHABLE;
2087 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2088 rtm->rtm_type = RTN_LOCAL;
2089 else
2090 rtm->rtm_type = RTN_UNICAST;
2091 rtm->rtm_flags = 0;
2092 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2093 rtm->rtm_protocol = rt->rt6i_protocol;
2094 if (rt->rt6i_flags&RTF_DYNAMIC)
2095 rtm->rtm_protocol = RTPROT_REDIRECT;
2096 else if (rt->rt6i_flags & RTF_ADDRCONF)
2097 rtm->rtm_protocol = RTPROT_KERNEL;
2098 else if (rt->rt6i_flags&RTF_DEFAULT)
2099 rtm->rtm_protocol = RTPROT_RA;
2100
2101 if (rt->rt6i_flags&RTF_CACHE)
2102 rtm->rtm_flags |= RTM_F_CLONED;
2103
2104 if (dst) {
2105 NLA_PUT(skb, RTA_DST, 16, dst);
2106 rtm->rtm_dst_len = 128;
2107 } else if (rtm->rtm_dst_len)
2108 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2109 #ifdef CONFIG_IPV6_SUBTREES
2110 if (src) {
2111 NLA_PUT(skb, RTA_SRC, 16, src);
2112 rtm->rtm_src_len = 128;
2113 } else if (rtm->rtm_src_len)
2114 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2115 #endif
2116 if (iif)
2117 NLA_PUT_U32(skb, RTA_IIF, iif);
2118 else if (dst) {
2119 struct in6_addr saddr_buf;
2120 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
2121 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2122 }
2123
2124 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2125 goto nla_put_failure;
2126
2127 if (rt->u.dst.neighbour)
2128 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2129
2130 if (rt->u.dst.dev)
2131 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2132
2133 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2134
2135 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
2136 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2137 expires, rt->u.dst.error) < 0)
2138 goto nla_put_failure;
2139
2140 return nlmsg_end(skb, nlh);
2141
2142 nla_put_failure:
2143 nlmsg_cancel(skb, nlh);
2144 return -EMSGSIZE;
2145 }
2146
2147 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2148 {
2149 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2150 int prefix;
2151
2152 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2153 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2154 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2155 } else
2156 prefix = 0;
2157
2158 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2159 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2160 prefix, NLM_F_MULTI);
2161 }
2162
2163 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2164 {
2165 struct net *net = in_skb->sk->sk_net;
2166 struct nlattr *tb[RTA_MAX+1];
2167 struct rt6_info *rt;
2168 struct sk_buff *skb;
2169 struct rtmsg *rtm;
2170 struct flowi fl;
2171 int err, iif = 0;
2172
2173 if (net != &init_net)
2174 return -EINVAL;
2175
2176 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2177 if (err < 0)
2178 goto errout;
2179
2180 err = -EINVAL;
2181 memset(&fl, 0, sizeof(fl));
2182
2183 if (tb[RTA_SRC]) {
2184 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2185 goto errout;
2186
2187 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2188 }
2189
2190 if (tb[RTA_DST]) {
2191 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2192 goto errout;
2193
2194 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2195 }
2196
2197 if (tb[RTA_IIF])
2198 iif = nla_get_u32(tb[RTA_IIF]);
2199
2200 if (tb[RTA_OIF])
2201 fl.oif = nla_get_u32(tb[RTA_OIF]);
2202
2203 if (iif) {
2204 struct net_device *dev;
2205 dev = __dev_get_by_index(&init_net, iif);
2206 if (!dev) {
2207 err = -ENODEV;
2208 goto errout;
2209 }
2210 }
2211
2212 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2213 if (skb == NULL) {
2214 err = -ENOBUFS;
2215 goto errout;
2216 }
2217
2218 /* Reserve room for dummy headers, this skb can pass
2219 through good chunk of routing engine.
2220 */
2221 skb_reset_mac_header(skb);
2222 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2223
2224 rt = (struct rt6_info*) ip6_route_output(NULL, &fl);
2225 skb->dst = &rt->u.dst;
2226
2227 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2228 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2229 nlh->nlmsg_seq, 0, 0);
2230 if (err < 0) {
2231 kfree_skb(skb);
2232 goto errout;
2233 }
2234
2235 err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
2236 errout:
2237 return err;
2238 }
2239
2240 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2241 {
2242 struct sk_buff *skb;
2243 u32 pid = 0, seq = 0;
2244 struct nlmsghdr *nlh = NULL;
2245 int err = -ENOBUFS;
2246
2247 if (info) {
2248 pid = info->pid;
2249 nlh = info->nlh;
2250 if (nlh)
2251 seq = nlh->nlmsg_seq;
2252 }
2253
2254 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2255 if (skb == NULL)
2256 goto errout;
2257
2258 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
2259 if (err < 0) {
2260 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2261 WARN_ON(err == -EMSGSIZE);
2262 kfree_skb(skb);
2263 goto errout;
2264 }
2265 err = rtnl_notify(skb, &init_net, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
2266 errout:
2267 if (err < 0)
2268 rtnl_set_sk_err(&init_net, RTNLGRP_IPV6_ROUTE, err);
2269 }
2270
2271 /*
2272 * /proc
2273 */
2274
2275 #ifdef CONFIG_PROC_FS
2276
2277 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2278
2279 struct rt6_proc_arg
2280 {
2281 char *buffer;
2282 int offset;
2283 int length;
2284 int skip;
2285 int len;
2286 };
2287
2288 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2289 {
2290 struct seq_file *m = p_arg;
2291
2292 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
2293 rt->rt6i_dst.plen);
2294
2295 #ifdef CONFIG_IPV6_SUBTREES
2296 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
2297 rt->rt6i_src.plen);
2298 #else
2299 seq_puts(m, "00000000000000000000000000000000 00 ");
2300 #endif
2301
2302 if (rt->rt6i_nexthop) {
2303 seq_printf(m, NIP6_SEQFMT,
2304 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2305 } else {
2306 seq_puts(m, "00000000000000000000000000000000");
2307 }
2308 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2309 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2310 rt->u.dst.__use, rt->rt6i_flags,
2311 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2312 return 0;
2313 }
2314
2315 static int ipv6_route_show(struct seq_file *m, void *v)
2316 {
2317 fib6_clean_all(rt6_info_route, 0, m);
2318 return 0;
2319 }
2320
2321 static int ipv6_route_open(struct inode *inode, struct file *file)
2322 {
2323 return single_open(file, ipv6_route_show, NULL);
2324 }
2325
2326 static const struct file_operations ipv6_route_proc_fops = {
2327 .owner = THIS_MODULE,
2328 .open = ipv6_route_open,
2329 .read = seq_read,
2330 .llseek = seq_lseek,
2331 .release = single_release,
2332 };
2333
2334 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2335 {
2336 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2337 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
2338 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
2339 rt6_stats.fib_rt_cache,
2340 atomic_read(&ip6_dst_ops.entries),
2341 rt6_stats.fib_discarded_routes);
2342
2343 return 0;
2344 }
2345
2346 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2347 {
2348 return single_open(file, rt6_stats_seq_show, NULL);
2349 }
2350
2351 static const struct file_operations rt6_stats_seq_fops = {
2352 .owner = THIS_MODULE,
2353 .open = rt6_stats_seq_open,
2354 .read = seq_read,
2355 .llseek = seq_lseek,
2356 .release = single_release,
2357 };
2358 #endif /* CONFIG_PROC_FS */
2359
2360 #ifdef CONFIG_SYSCTL
2361
2362 static int flush_delay;
2363
2364 static
2365 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2366 void __user *buffer, size_t *lenp, loff_t *ppos)
2367 {
2368 if (write) {
2369 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2370 fib6_run_gc(flush_delay <= 0 ? ~0UL : (unsigned long)flush_delay);
2371 return 0;
2372 } else
2373 return -EINVAL;
2374 }
2375
2376 ctl_table ipv6_route_table[] = {
2377 {
2378 .procname = "flush",
2379 .data = &flush_delay,
2380 .maxlen = sizeof(int),
2381 .mode = 0200,
2382 .proc_handler = &ipv6_sysctl_rtcache_flush
2383 },
2384 {
2385 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2386 .procname = "gc_thresh",
2387 .data = &ip6_dst_ops.gc_thresh,
2388 .maxlen = sizeof(int),
2389 .mode = 0644,
2390 .proc_handler = &proc_dointvec,
2391 },
2392 {
2393 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2394 .procname = "max_size",
2395 .data = &ip6_rt_max_size,
2396 .maxlen = sizeof(int),
2397 .mode = 0644,
2398 .proc_handler = &proc_dointvec,
2399 },
2400 {
2401 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2402 .procname = "gc_min_interval",
2403 .data = &ip6_rt_gc_min_interval,
2404 .maxlen = sizeof(int),
2405 .mode = 0644,
2406 .proc_handler = &proc_dointvec_jiffies,
2407 .strategy = &sysctl_jiffies,
2408 },
2409 {
2410 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2411 .procname = "gc_timeout",
2412 .data = &ip6_rt_gc_timeout,
2413 .maxlen = sizeof(int),
2414 .mode = 0644,
2415 .proc_handler = &proc_dointvec_jiffies,
2416 .strategy = &sysctl_jiffies,
2417 },
2418 {
2419 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2420 .procname = "gc_interval",
2421 .data = &ip6_rt_gc_interval,
2422 .maxlen = sizeof(int),
2423 .mode = 0644,
2424 .proc_handler = &proc_dointvec_jiffies,
2425 .strategy = &sysctl_jiffies,
2426 },
2427 {
2428 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2429 .procname = "gc_elasticity",
2430 .data = &ip6_rt_gc_elasticity,
2431 .maxlen = sizeof(int),
2432 .mode = 0644,
2433 .proc_handler = &proc_dointvec_jiffies,
2434 .strategy = &sysctl_jiffies,
2435 },
2436 {
2437 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2438 .procname = "mtu_expires",
2439 .data = &ip6_rt_mtu_expires,
2440 .maxlen = sizeof(int),
2441 .mode = 0644,
2442 .proc_handler = &proc_dointvec_jiffies,
2443 .strategy = &sysctl_jiffies,
2444 },
2445 {
2446 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2447 .procname = "min_adv_mss",
2448 .data = &ip6_rt_min_advmss,
2449 .maxlen = sizeof(int),
2450 .mode = 0644,
2451 .proc_handler = &proc_dointvec_jiffies,
2452 .strategy = &sysctl_jiffies,
2453 },
2454 {
2455 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2456 .procname = "gc_min_interval_ms",
2457 .data = &ip6_rt_gc_min_interval,
2458 .maxlen = sizeof(int),
2459 .mode = 0644,
2460 .proc_handler = &proc_dointvec_ms_jiffies,
2461 .strategy = &sysctl_ms_jiffies,
2462 },
2463 { .ctl_name = 0 }
2464 };
2465
2466 #endif
2467
2468 void __init ip6_route_init(void)
2469 {
2470 ip6_dst_ops.kmem_cachep =
2471 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2472 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2473 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
2474
2475 fib6_init();
2476 proc_net_fops_create(&init_net, "ipv6_route", 0, &ipv6_route_proc_fops);
2477 proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2478 #ifdef CONFIG_XFRM
2479 xfrm6_init();
2480 #endif
2481 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2482 fib6_rules_init();
2483 #endif
2484
2485 __rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL);
2486 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL);
2487 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL);
2488 }
2489
2490 void ip6_route_cleanup(void)
2491 {
2492 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2493 fib6_rules_cleanup();
2494 #endif
2495 #ifdef CONFIG_PROC_FS
2496 proc_net_remove(&init_net, "ipv6_route");
2497 proc_net_remove(&init_net, "rt6_stats");
2498 #endif
2499 #ifdef CONFIG_XFRM
2500 xfrm6_fini();
2501 #endif
2502 rt6_ifdown(NULL);
2503 fib6_gc_cleanup();
2504 kmem_cache_destroy(ip6_dst_ops.kmem_cachep);
2505 }