[NET]: Modify all rtnetlink methods to only work in the initial namespace (v2)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /* Changes:
17 *
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
25 * Ville Nuorvala
26 * Fixed routing subtrees.
27 */
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/net_namespace.h>
44 #include <net/snmp.h>
45 #include <net/ipv6.h>
46 #include <net/ip6_fib.h>
47 #include <net/ip6_route.h>
48 #include <net/ndisc.h>
49 #include <net/addrconf.h>
50 #include <net/tcp.h>
51 #include <linux/rtnetlink.h>
52 #include <net/dst.h>
53 #include <net/xfrm.h>
54 #include <net/netevent.h>
55 #include <net/netlink.h>
56
57 #include <asm/uaccess.h>
58
59 #ifdef CONFIG_SYSCTL
60 #include <linux/sysctl.h>
61 #endif
62
63 /* Set to 3 to get tracing. */
64 #define RT6_DEBUG 2
65
66 #if RT6_DEBUG >= 3
67 #define RDBG(x) printk x
68 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
69 #else
70 #define RDBG(x)
71 #define RT6_TRACE(x...) do { ; } while (0)
72 #endif
73
74 #define CLONE_OFFLINK_ROUTE 0
75
76 static int ip6_rt_max_size = 4096;
77 static int ip6_rt_gc_min_interval = HZ / 2;
78 static int ip6_rt_gc_timeout = 60*HZ;
79 int ip6_rt_gc_interval = 30*HZ;
80 static int ip6_rt_gc_elasticity = 9;
81 static int ip6_rt_mtu_expires = 10*60*HZ;
82 static int ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
83
84 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
85 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void ip6_dst_destroy(struct dst_entry *);
88 static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90 static int ip6_dst_gc(void);
91
92 static int ip6_pkt_discard(struct sk_buff *skb);
93 static int ip6_pkt_discard_out(struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
96
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
99 struct in6_addr *gwaddr, int ifindex,
100 unsigned pref);
101 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
102 struct in6_addr *gwaddr, int ifindex);
103 #endif
104
105 static struct dst_ops ip6_dst_ops = {
106 .family = AF_INET6,
107 .protocol = __constant_htons(ETH_P_IPV6),
108 .gc = ip6_dst_gc,
109 .gc_thresh = 1024,
110 .check = ip6_dst_check,
111 .destroy = ip6_dst_destroy,
112 .ifdown = ip6_dst_ifdown,
113 .negative_advice = ip6_negative_advice,
114 .link_failure = ip6_link_failure,
115 .update_pmtu = ip6_rt_update_pmtu,
116 .local_out = ip6_local_out,
117 .entry_size = sizeof(struct rt6_info),
118 };
119
120 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
121 {
122 }
123
124 static struct dst_ops ip6_dst_blackhole_ops = {
125 .family = AF_INET6,
126 .protocol = __constant_htons(ETH_P_IPV6),
127 .destroy = ip6_dst_destroy,
128 .check = ip6_dst_check,
129 .update_pmtu = ip6_rt_blackhole_update_pmtu,
130 .entry_size = sizeof(struct rt6_info),
131 };
132
133 struct rt6_info ip6_null_entry = {
134 .u = {
135 .dst = {
136 .__refcnt = ATOMIC_INIT(1),
137 .__use = 1,
138 .obsolete = -1,
139 .error = -ENETUNREACH,
140 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
141 .input = ip6_pkt_discard,
142 .output = ip6_pkt_discard_out,
143 .ops = &ip6_dst_ops,
144 .path = (struct dst_entry*)&ip6_null_entry,
145 }
146 },
147 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
148 .rt6i_metric = ~(u32) 0,
149 .rt6i_ref = ATOMIC_INIT(1),
150 };
151
152 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
153
154 static int ip6_pkt_prohibit(struct sk_buff *skb);
155 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
156
157 struct rt6_info ip6_prohibit_entry = {
158 .u = {
159 .dst = {
160 .__refcnt = ATOMIC_INIT(1),
161 .__use = 1,
162 .obsolete = -1,
163 .error = -EACCES,
164 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
165 .input = ip6_pkt_prohibit,
166 .output = ip6_pkt_prohibit_out,
167 .ops = &ip6_dst_ops,
168 .path = (struct dst_entry*)&ip6_prohibit_entry,
169 }
170 },
171 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
172 .rt6i_metric = ~(u32) 0,
173 .rt6i_ref = ATOMIC_INIT(1),
174 };
175
176 struct rt6_info ip6_blk_hole_entry = {
177 .u = {
178 .dst = {
179 .__refcnt = ATOMIC_INIT(1),
180 .__use = 1,
181 .obsolete = -1,
182 .error = -EINVAL,
183 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
184 .input = dst_discard,
185 .output = dst_discard,
186 .ops = &ip6_dst_ops,
187 .path = (struct dst_entry*)&ip6_blk_hole_entry,
188 }
189 },
190 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
191 .rt6i_metric = ~(u32) 0,
192 .rt6i_ref = ATOMIC_INIT(1),
193 };
194
195 #endif
196
197 /* allocate dst with ip6_dst_ops */
198 static __inline__ struct rt6_info *ip6_dst_alloc(void)
199 {
200 return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
201 }
202
203 static void ip6_dst_destroy(struct dst_entry *dst)
204 {
205 struct rt6_info *rt = (struct rt6_info *)dst;
206 struct inet6_dev *idev = rt->rt6i_idev;
207
208 if (idev != NULL) {
209 rt->rt6i_idev = NULL;
210 in6_dev_put(idev);
211 }
212 }
213
214 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
215 int how)
216 {
217 struct rt6_info *rt = (struct rt6_info *)dst;
218 struct inet6_dev *idev = rt->rt6i_idev;
219
220 if (dev != init_net.loopback_dev && idev != NULL && idev->dev == dev) {
221 struct inet6_dev *loopback_idev = in6_dev_get(init_net.loopback_dev);
222 if (loopback_idev != NULL) {
223 rt->rt6i_idev = loopback_idev;
224 in6_dev_put(idev);
225 }
226 }
227 }
228
229 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
230 {
231 return (rt->rt6i_flags & RTF_EXPIRES &&
232 time_after(jiffies, rt->rt6i_expires));
233 }
234
235 static inline int rt6_need_strict(struct in6_addr *daddr)
236 {
237 return (ipv6_addr_type(daddr) &
238 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
239 }
240
241 /*
242 * Route lookup. Any table->tb6_lock is implied.
243 */
244
245 static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
246 int oif,
247 int strict)
248 {
249 struct rt6_info *local = NULL;
250 struct rt6_info *sprt;
251
252 if (oif) {
253 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
254 struct net_device *dev = sprt->rt6i_dev;
255 if (dev->ifindex == oif)
256 return sprt;
257 if (dev->flags & IFF_LOOPBACK) {
258 if (sprt->rt6i_idev == NULL ||
259 sprt->rt6i_idev->dev->ifindex != oif) {
260 if (strict && oif)
261 continue;
262 if (local && (!oif ||
263 local->rt6i_idev->dev->ifindex == oif))
264 continue;
265 }
266 local = sprt;
267 }
268 }
269
270 if (local)
271 return local;
272
273 if (strict)
274 return &ip6_null_entry;
275 }
276 return rt;
277 }
278
279 #ifdef CONFIG_IPV6_ROUTER_PREF
280 static void rt6_probe(struct rt6_info *rt)
281 {
282 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
283 /*
284 * Okay, this does not seem to be appropriate
285 * for now, however, we need to check if it
286 * is really so; aka Router Reachability Probing.
287 *
288 * Router Reachability Probe MUST be rate-limited
289 * to no more than one per minute.
290 */
291 if (!neigh || (neigh->nud_state & NUD_VALID))
292 return;
293 read_lock_bh(&neigh->lock);
294 if (!(neigh->nud_state & NUD_VALID) &&
295 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
296 struct in6_addr mcaddr;
297 struct in6_addr *target;
298
299 neigh->updated = jiffies;
300 read_unlock_bh(&neigh->lock);
301
302 target = (struct in6_addr *)&neigh->primary_key;
303 addrconf_addr_solict_mult(target, &mcaddr);
304 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
305 } else
306 read_unlock_bh(&neigh->lock);
307 }
308 #else
309 static inline void rt6_probe(struct rt6_info *rt)
310 {
311 return;
312 }
313 #endif
314
315 /*
316 * Default Router Selection (RFC 2461 6.3.6)
317 */
318 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
319 {
320 struct net_device *dev = rt->rt6i_dev;
321 if (!oif || dev->ifindex == oif)
322 return 2;
323 if ((dev->flags & IFF_LOOPBACK) &&
324 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
325 return 1;
326 return 0;
327 }
328
329 static inline int rt6_check_neigh(struct rt6_info *rt)
330 {
331 struct neighbour *neigh = rt->rt6i_nexthop;
332 int m;
333 if (rt->rt6i_flags & RTF_NONEXTHOP ||
334 !(rt->rt6i_flags & RTF_GATEWAY))
335 m = 1;
336 else if (neigh) {
337 read_lock_bh(&neigh->lock);
338 if (neigh->nud_state & NUD_VALID)
339 m = 2;
340 #ifdef CONFIG_IPV6_ROUTER_PREF
341 else if (neigh->nud_state & NUD_FAILED)
342 m = 0;
343 #endif
344 else
345 m = 1;
346 read_unlock_bh(&neigh->lock);
347 } else
348 m = 0;
349 return m;
350 }
351
352 static int rt6_score_route(struct rt6_info *rt, int oif,
353 int strict)
354 {
355 int m, n;
356
357 m = rt6_check_dev(rt, oif);
358 if (!m && (strict & RT6_LOOKUP_F_IFACE))
359 return -1;
360 #ifdef CONFIG_IPV6_ROUTER_PREF
361 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
362 #endif
363 n = rt6_check_neigh(rt);
364 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
365 return -1;
366 return m;
367 }
368
369 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
370 int *mpri, struct rt6_info *match)
371 {
372 int m;
373
374 if (rt6_check_expired(rt))
375 goto out;
376
377 m = rt6_score_route(rt, oif, strict);
378 if (m < 0)
379 goto out;
380
381 if (m > *mpri) {
382 if (strict & RT6_LOOKUP_F_REACHABLE)
383 rt6_probe(match);
384 *mpri = m;
385 match = rt;
386 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
387 rt6_probe(rt);
388 }
389
390 out:
391 return match;
392 }
393
394 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
395 struct rt6_info *rr_head,
396 u32 metric, int oif, int strict)
397 {
398 struct rt6_info *rt, *match;
399 int mpri = -1;
400
401 match = NULL;
402 for (rt = rr_head; rt && rt->rt6i_metric == metric;
403 rt = rt->u.dst.rt6_next)
404 match = find_match(rt, oif, strict, &mpri, match);
405 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
406 rt = rt->u.dst.rt6_next)
407 match = find_match(rt, oif, strict, &mpri, match);
408
409 return match;
410 }
411
412 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
413 {
414 struct rt6_info *match, *rt0;
415
416 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
417 __FUNCTION__, fn->leaf, oif);
418
419 rt0 = fn->rr_ptr;
420 if (!rt0)
421 fn->rr_ptr = rt0 = fn->leaf;
422
423 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
424
425 if (!match &&
426 (strict & RT6_LOOKUP_F_REACHABLE)) {
427 struct rt6_info *next = rt0->u.dst.rt6_next;
428
429 /* no entries matched; do round-robin */
430 if (!next || next->rt6i_metric != rt0->rt6i_metric)
431 next = fn->leaf;
432
433 if (next != rt0)
434 fn->rr_ptr = next;
435 }
436
437 RT6_TRACE("%s() => %p\n",
438 __FUNCTION__, match);
439
440 return (match ? match : &ip6_null_entry);
441 }
442
443 #ifdef CONFIG_IPV6_ROUTE_INFO
444 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
445 struct in6_addr *gwaddr)
446 {
447 struct route_info *rinfo = (struct route_info *) opt;
448 struct in6_addr prefix_buf, *prefix;
449 unsigned int pref;
450 u32 lifetime;
451 struct rt6_info *rt;
452
453 if (len < sizeof(struct route_info)) {
454 return -EINVAL;
455 }
456
457 /* Sanity check for prefix_len and length */
458 if (rinfo->length > 3) {
459 return -EINVAL;
460 } else if (rinfo->prefix_len > 128) {
461 return -EINVAL;
462 } else if (rinfo->prefix_len > 64) {
463 if (rinfo->length < 2) {
464 return -EINVAL;
465 }
466 } else if (rinfo->prefix_len > 0) {
467 if (rinfo->length < 1) {
468 return -EINVAL;
469 }
470 }
471
472 pref = rinfo->route_pref;
473 if (pref == ICMPV6_ROUTER_PREF_INVALID)
474 pref = ICMPV6_ROUTER_PREF_MEDIUM;
475
476 lifetime = ntohl(rinfo->lifetime);
477 if (lifetime == 0xffffffff) {
478 /* infinity */
479 } else if (lifetime > 0x7fffffff/HZ) {
480 /* Avoid arithmetic overflow */
481 lifetime = 0x7fffffff/HZ - 1;
482 }
483
484 if (rinfo->length == 3)
485 prefix = (struct in6_addr *)rinfo->prefix;
486 else {
487 /* this function is safe */
488 ipv6_addr_prefix(&prefix_buf,
489 (struct in6_addr *)rinfo->prefix,
490 rinfo->prefix_len);
491 prefix = &prefix_buf;
492 }
493
494 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex);
495
496 if (rt && !lifetime) {
497 ip6_del_rt(rt);
498 rt = NULL;
499 }
500
501 if (!rt && lifetime)
502 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
503 pref);
504 else if (rt)
505 rt->rt6i_flags = RTF_ROUTEINFO |
506 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
507
508 if (rt) {
509 if (lifetime == 0xffffffff) {
510 rt->rt6i_flags &= ~RTF_EXPIRES;
511 } else {
512 rt->rt6i_expires = jiffies + HZ * lifetime;
513 rt->rt6i_flags |= RTF_EXPIRES;
514 }
515 dst_release(&rt->u.dst);
516 }
517 return 0;
518 }
519 #endif
520
521 #define BACKTRACK(saddr) \
522 do { \
523 if (rt == &ip6_null_entry) { \
524 struct fib6_node *pn; \
525 while (1) { \
526 if (fn->fn_flags & RTN_TL_ROOT) \
527 goto out; \
528 pn = fn->parent; \
529 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
530 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
531 else \
532 fn = pn; \
533 if (fn->fn_flags & RTN_RTINFO) \
534 goto restart; \
535 } \
536 } \
537 } while(0)
538
539 static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
540 struct flowi *fl, int flags)
541 {
542 struct fib6_node *fn;
543 struct rt6_info *rt;
544
545 read_lock_bh(&table->tb6_lock);
546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
547 restart:
548 rt = fn->leaf;
549 rt = rt6_device_match(rt, fl->oif, flags);
550 BACKTRACK(&fl->fl6_src);
551 out:
552 dst_use(&rt->u.dst, jiffies);
553 read_unlock_bh(&table->tb6_lock);
554 return rt;
555
556 }
557
558 struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
559 int oif, int strict)
560 {
561 struct flowi fl = {
562 .oif = oif,
563 .nl_u = {
564 .ip6_u = {
565 .daddr = *daddr,
566 },
567 },
568 };
569 struct dst_entry *dst;
570 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
571
572 if (saddr) {
573 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
574 flags |= RT6_LOOKUP_F_HAS_SADDR;
575 }
576
577 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
578 if (dst->error == 0)
579 return (struct rt6_info *) dst;
580
581 dst_release(dst);
582
583 return NULL;
584 }
585
586 EXPORT_SYMBOL(rt6_lookup);
587
588 /* ip6_ins_rt is called with FREE table->tb6_lock.
589 It takes new route entry, the addition fails by any reason the
590 route is freed. In any case, if caller does not hold it, it may
591 be destroyed.
592 */
593
594 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
595 {
596 int err;
597 struct fib6_table *table;
598
599 table = rt->rt6i_table;
600 write_lock_bh(&table->tb6_lock);
601 err = fib6_add(&table->tb6_root, rt, info);
602 write_unlock_bh(&table->tb6_lock);
603
604 return err;
605 }
606
607 int ip6_ins_rt(struct rt6_info *rt)
608 {
609 return __ip6_ins_rt(rt, NULL);
610 }
611
612 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
613 struct in6_addr *saddr)
614 {
615 struct rt6_info *rt;
616
617 /*
618 * Clone the route.
619 */
620
621 rt = ip6_rt_copy(ort);
622
623 if (rt) {
624 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
625 if (rt->rt6i_dst.plen != 128 &&
626 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
627 rt->rt6i_flags |= RTF_ANYCAST;
628 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
629 }
630
631 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
632 rt->rt6i_dst.plen = 128;
633 rt->rt6i_flags |= RTF_CACHE;
634 rt->u.dst.flags |= DST_HOST;
635
636 #ifdef CONFIG_IPV6_SUBTREES
637 if (rt->rt6i_src.plen && saddr) {
638 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
639 rt->rt6i_src.plen = 128;
640 }
641 #endif
642
643 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
644
645 }
646
647 return rt;
648 }
649
650 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
651 {
652 struct rt6_info *rt = ip6_rt_copy(ort);
653 if (rt) {
654 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
655 rt->rt6i_dst.plen = 128;
656 rt->rt6i_flags |= RTF_CACHE;
657 rt->u.dst.flags |= DST_HOST;
658 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
659 }
660 return rt;
661 }
662
663 static struct rt6_info *ip6_pol_route(struct fib6_table *table, int oif,
664 struct flowi *fl, int flags)
665 {
666 struct fib6_node *fn;
667 struct rt6_info *rt, *nrt;
668 int strict = 0;
669 int attempts = 3;
670 int err;
671 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
672
673 strict |= flags & RT6_LOOKUP_F_IFACE;
674
675 relookup:
676 read_lock_bh(&table->tb6_lock);
677
678 restart_2:
679 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
680
681 restart:
682 rt = rt6_select(fn, oif, strict | reachable);
683 BACKTRACK(&fl->fl6_src);
684 if (rt == &ip6_null_entry ||
685 rt->rt6i_flags & RTF_CACHE)
686 goto out;
687
688 dst_hold(&rt->u.dst);
689 read_unlock_bh(&table->tb6_lock);
690
691 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
692 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
693 else {
694 #if CLONE_OFFLINK_ROUTE
695 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
696 #else
697 goto out2;
698 #endif
699 }
700
701 dst_release(&rt->u.dst);
702 rt = nrt ? : &ip6_null_entry;
703
704 dst_hold(&rt->u.dst);
705 if (nrt) {
706 err = ip6_ins_rt(nrt);
707 if (!err)
708 goto out2;
709 }
710
711 if (--attempts <= 0)
712 goto out2;
713
714 /*
715 * Race condition! In the gap, when table->tb6_lock was
716 * released someone could insert this route. Relookup.
717 */
718 dst_release(&rt->u.dst);
719 goto relookup;
720
721 out:
722 if (reachable) {
723 reachable = 0;
724 goto restart_2;
725 }
726 dst_hold(&rt->u.dst);
727 read_unlock_bh(&table->tb6_lock);
728 out2:
729 rt->u.dst.lastuse = jiffies;
730 rt->u.dst.__use++;
731
732 return rt;
733 }
734
735 static struct rt6_info *ip6_pol_route_input(struct fib6_table *table,
736 struct flowi *fl, int flags)
737 {
738 return ip6_pol_route(table, fl->iif, fl, flags);
739 }
740
741 void ip6_route_input(struct sk_buff *skb)
742 {
743 struct ipv6hdr *iph = ipv6_hdr(skb);
744 int flags = RT6_LOOKUP_F_HAS_SADDR;
745 struct flowi fl = {
746 .iif = skb->dev->ifindex,
747 .nl_u = {
748 .ip6_u = {
749 .daddr = iph->daddr,
750 .saddr = iph->saddr,
751 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
752 },
753 },
754 .mark = skb->mark,
755 .proto = iph->nexthdr,
756 };
757
758 if (rt6_need_strict(&iph->daddr))
759 flags |= RT6_LOOKUP_F_IFACE;
760
761 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
762 }
763
764 static struct rt6_info *ip6_pol_route_output(struct fib6_table *table,
765 struct flowi *fl, int flags)
766 {
767 return ip6_pol_route(table, fl->oif, fl, flags);
768 }
769
770 struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
771 {
772 int flags = 0;
773
774 if (rt6_need_strict(&fl->fl6_dst))
775 flags |= RT6_LOOKUP_F_IFACE;
776
777 if (!ipv6_addr_any(&fl->fl6_src))
778 flags |= RT6_LOOKUP_F_HAS_SADDR;
779
780 return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
781 }
782
783 EXPORT_SYMBOL(ip6_route_output);
784
785 int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
786 {
787 struct rt6_info *ort = (struct rt6_info *) *dstp;
788 struct rt6_info *rt = (struct rt6_info *)
789 dst_alloc(&ip6_dst_blackhole_ops);
790 struct dst_entry *new = NULL;
791
792 if (rt) {
793 new = &rt->u.dst;
794
795 atomic_set(&new->__refcnt, 1);
796 new->__use = 1;
797 new->input = dst_discard;
798 new->output = dst_discard;
799
800 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
801 new->dev = ort->u.dst.dev;
802 if (new->dev)
803 dev_hold(new->dev);
804 rt->rt6i_idev = ort->rt6i_idev;
805 if (rt->rt6i_idev)
806 in6_dev_hold(rt->rt6i_idev);
807 rt->rt6i_expires = 0;
808
809 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
810 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
811 rt->rt6i_metric = 0;
812
813 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
814 #ifdef CONFIG_IPV6_SUBTREES
815 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
816 #endif
817
818 dst_free(new);
819 }
820
821 dst_release(*dstp);
822 *dstp = new;
823 return (new ? 0 : -ENOMEM);
824 }
825 EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
826
827 /*
828 * Destination cache support functions
829 */
830
831 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
832 {
833 struct rt6_info *rt;
834
835 rt = (struct rt6_info *) dst;
836
837 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
838 return dst;
839
840 return NULL;
841 }
842
843 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
844 {
845 struct rt6_info *rt = (struct rt6_info *) dst;
846
847 if (rt) {
848 if (rt->rt6i_flags & RTF_CACHE)
849 ip6_del_rt(rt);
850 else
851 dst_release(dst);
852 }
853 return NULL;
854 }
855
856 static void ip6_link_failure(struct sk_buff *skb)
857 {
858 struct rt6_info *rt;
859
860 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
861
862 rt = (struct rt6_info *) skb->dst;
863 if (rt) {
864 if (rt->rt6i_flags&RTF_CACHE) {
865 dst_set_expires(&rt->u.dst, 0);
866 rt->rt6i_flags |= RTF_EXPIRES;
867 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
868 rt->rt6i_node->fn_sernum = -1;
869 }
870 }
871
872 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
873 {
874 struct rt6_info *rt6 = (struct rt6_info*)dst;
875
876 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
877 rt6->rt6i_flags |= RTF_MODIFIED;
878 if (mtu < IPV6_MIN_MTU) {
879 mtu = IPV6_MIN_MTU;
880 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
881 }
882 dst->metrics[RTAX_MTU-1] = mtu;
883 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
884 }
885 }
886
887 static int ipv6_get_mtu(struct net_device *dev);
888
889 static inline unsigned int ipv6_advmss(unsigned int mtu)
890 {
891 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
892
893 if (mtu < ip6_rt_min_advmss)
894 mtu = ip6_rt_min_advmss;
895
896 /*
897 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
898 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
899 * IPV6_MAXPLEN is also valid and means: "any MSS,
900 * rely only on pmtu discovery"
901 */
902 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
903 mtu = IPV6_MAXPLEN;
904 return mtu;
905 }
906
907 static struct dst_entry *ndisc_dst_gc_list;
908 static DEFINE_SPINLOCK(ndisc_lock);
909
910 struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
911 struct neighbour *neigh,
912 struct in6_addr *addr,
913 int (*output)(struct sk_buff *))
914 {
915 struct rt6_info *rt;
916 struct inet6_dev *idev = in6_dev_get(dev);
917
918 if (unlikely(idev == NULL))
919 return NULL;
920
921 rt = ip6_dst_alloc();
922 if (unlikely(rt == NULL)) {
923 in6_dev_put(idev);
924 goto out;
925 }
926
927 dev_hold(dev);
928 if (neigh)
929 neigh_hold(neigh);
930 else
931 neigh = ndisc_get_neigh(dev, addr);
932
933 rt->rt6i_dev = dev;
934 rt->rt6i_idev = idev;
935 rt->rt6i_nexthop = neigh;
936 atomic_set(&rt->u.dst.__refcnt, 1);
937 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
938 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
939 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
940 rt->u.dst.output = output;
941
942 #if 0 /* there's no chance to use these for ndisc */
943 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
944 ? DST_HOST
945 : 0;
946 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
947 rt->rt6i_dst.plen = 128;
948 #endif
949
950 spin_lock_bh(&ndisc_lock);
951 rt->u.dst.next = ndisc_dst_gc_list;
952 ndisc_dst_gc_list = &rt->u.dst;
953 spin_unlock_bh(&ndisc_lock);
954
955 fib6_force_start_gc();
956
957 out:
958 return &rt->u.dst;
959 }
960
961 int ndisc_dst_gc(int *more)
962 {
963 struct dst_entry *dst, *next, **pprev;
964 int freed;
965
966 next = NULL;
967 freed = 0;
968
969 spin_lock_bh(&ndisc_lock);
970 pprev = &ndisc_dst_gc_list;
971
972 while ((dst = *pprev) != NULL) {
973 if (!atomic_read(&dst->__refcnt)) {
974 *pprev = dst->next;
975 dst_free(dst);
976 freed++;
977 } else {
978 pprev = &dst->next;
979 (*more)++;
980 }
981 }
982
983 spin_unlock_bh(&ndisc_lock);
984
985 return freed;
986 }
987
988 static int ip6_dst_gc(void)
989 {
990 static unsigned expire = 30*HZ;
991 static unsigned long last_gc;
992 unsigned long now = jiffies;
993
994 if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
995 atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
996 goto out;
997
998 expire++;
999 fib6_run_gc(expire);
1000 last_gc = now;
1001 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh)
1002 expire = ip6_rt_gc_timeout>>1;
1003
1004 out:
1005 expire -= expire>>ip6_rt_gc_elasticity;
1006 return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
1007 }
1008
1009 /* Clean host part of a prefix. Not necessary in radix tree,
1010 but results in cleaner routing tables.
1011
1012 Remove it only when all the things will work!
1013 */
1014
1015 static int ipv6_get_mtu(struct net_device *dev)
1016 {
1017 int mtu = IPV6_MIN_MTU;
1018 struct inet6_dev *idev;
1019
1020 idev = in6_dev_get(dev);
1021 if (idev) {
1022 mtu = idev->cnf.mtu6;
1023 in6_dev_put(idev);
1024 }
1025 return mtu;
1026 }
1027
1028 int ipv6_get_hoplimit(struct net_device *dev)
1029 {
1030 int hoplimit = ipv6_devconf.hop_limit;
1031 struct inet6_dev *idev;
1032
1033 idev = in6_dev_get(dev);
1034 if (idev) {
1035 hoplimit = idev->cnf.hop_limit;
1036 in6_dev_put(idev);
1037 }
1038 return hoplimit;
1039 }
1040
1041 /*
1042 *
1043 */
1044
1045 int ip6_route_add(struct fib6_config *cfg)
1046 {
1047 int err;
1048 struct rt6_info *rt = NULL;
1049 struct net_device *dev = NULL;
1050 struct inet6_dev *idev = NULL;
1051 struct fib6_table *table;
1052 int addr_type;
1053
1054 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1055 return -EINVAL;
1056 #ifndef CONFIG_IPV6_SUBTREES
1057 if (cfg->fc_src_len)
1058 return -EINVAL;
1059 #endif
1060 if (cfg->fc_ifindex) {
1061 err = -ENODEV;
1062 dev = dev_get_by_index(&init_net, cfg->fc_ifindex);
1063 if (!dev)
1064 goto out;
1065 idev = in6_dev_get(dev);
1066 if (!idev)
1067 goto out;
1068 }
1069
1070 if (cfg->fc_metric == 0)
1071 cfg->fc_metric = IP6_RT_PRIO_USER;
1072
1073 table = fib6_new_table(cfg->fc_table);
1074 if (table == NULL) {
1075 err = -ENOBUFS;
1076 goto out;
1077 }
1078
1079 rt = ip6_dst_alloc();
1080
1081 if (rt == NULL) {
1082 err = -ENOMEM;
1083 goto out;
1084 }
1085
1086 rt->u.dst.obsolete = -1;
1087 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires);
1088
1089 if (cfg->fc_protocol == RTPROT_UNSPEC)
1090 cfg->fc_protocol = RTPROT_BOOT;
1091 rt->rt6i_protocol = cfg->fc_protocol;
1092
1093 addr_type = ipv6_addr_type(&cfg->fc_dst);
1094
1095 if (addr_type & IPV6_ADDR_MULTICAST)
1096 rt->u.dst.input = ip6_mc_input;
1097 else
1098 rt->u.dst.input = ip6_forward;
1099
1100 rt->u.dst.output = ip6_output;
1101
1102 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1103 rt->rt6i_dst.plen = cfg->fc_dst_len;
1104 if (rt->rt6i_dst.plen == 128)
1105 rt->u.dst.flags = DST_HOST;
1106
1107 #ifdef CONFIG_IPV6_SUBTREES
1108 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1109 rt->rt6i_src.plen = cfg->fc_src_len;
1110 #endif
1111
1112 rt->rt6i_metric = cfg->fc_metric;
1113
1114 /* We cannot add true routes via loopback here,
1115 they would result in kernel looping; promote them to reject routes
1116 */
1117 if ((cfg->fc_flags & RTF_REJECT) ||
1118 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1119 /* hold loopback dev/idev if we haven't done so. */
1120 if (dev != init_net.loopback_dev) {
1121 if (dev) {
1122 dev_put(dev);
1123 in6_dev_put(idev);
1124 }
1125 dev = init_net.loopback_dev;
1126 dev_hold(dev);
1127 idev = in6_dev_get(dev);
1128 if (!idev) {
1129 err = -ENODEV;
1130 goto out;
1131 }
1132 }
1133 rt->u.dst.output = ip6_pkt_discard_out;
1134 rt->u.dst.input = ip6_pkt_discard;
1135 rt->u.dst.error = -ENETUNREACH;
1136 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1137 goto install_route;
1138 }
1139
1140 if (cfg->fc_flags & RTF_GATEWAY) {
1141 struct in6_addr *gw_addr;
1142 int gwa_type;
1143
1144 gw_addr = &cfg->fc_gateway;
1145 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1146 gwa_type = ipv6_addr_type(gw_addr);
1147
1148 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1149 struct rt6_info *grt;
1150
1151 /* IPv6 strictly inhibits using not link-local
1152 addresses as nexthop address.
1153 Otherwise, router will not able to send redirects.
1154 It is very good, but in some (rare!) circumstances
1155 (SIT, PtP, NBMA NOARP links) it is handy to allow
1156 some exceptions. --ANK
1157 */
1158 err = -EINVAL;
1159 if (!(gwa_type&IPV6_ADDR_UNICAST))
1160 goto out;
1161
1162 grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1);
1163
1164 err = -EHOSTUNREACH;
1165 if (grt == NULL)
1166 goto out;
1167 if (dev) {
1168 if (dev != grt->rt6i_dev) {
1169 dst_release(&grt->u.dst);
1170 goto out;
1171 }
1172 } else {
1173 dev = grt->rt6i_dev;
1174 idev = grt->rt6i_idev;
1175 dev_hold(dev);
1176 in6_dev_hold(grt->rt6i_idev);
1177 }
1178 if (!(grt->rt6i_flags&RTF_GATEWAY))
1179 err = 0;
1180 dst_release(&grt->u.dst);
1181
1182 if (err)
1183 goto out;
1184 }
1185 err = -EINVAL;
1186 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1187 goto out;
1188 }
1189
1190 err = -ENODEV;
1191 if (dev == NULL)
1192 goto out;
1193
1194 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1195 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1196 if (IS_ERR(rt->rt6i_nexthop)) {
1197 err = PTR_ERR(rt->rt6i_nexthop);
1198 rt->rt6i_nexthop = NULL;
1199 goto out;
1200 }
1201 }
1202
1203 rt->rt6i_flags = cfg->fc_flags;
1204
1205 install_route:
1206 if (cfg->fc_mx) {
1207 struct nlattr *nla;
1208 int remaining;
1209
1210 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1211 int type = nla_type(nla);
1212
1213 if (type) {
1214 if (type > RTAX_MAX) {
1215 err = -EINVAL;
1216 goto out;
1217 }
1218
1219 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1220 }
1221 }
1222 }
1223
1224 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1225 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1226 if (!rt->u.dst.metrics[RTAX_MTU-1])
1227 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1228 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1229 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1230 rt->u.dst.dev = dev;
1231 rt->rt6i_idev = idev;
1232 rt->rt6i_table = table;
1233 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1234
1235 out:
1236 if (dev)
1237 dev_put(dev);
1238 if (idev)
1239 in6_dev_put(idev);
1240 if (rt)
1241 dst_free(&rt->u.dst);
1242 return err;
1243 }
1244
1245 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1246 {
1247 int err;
1248 struct fib6_table *table;
1249
1250 if (rt == &ip6_null_entry)
1251 return -ENOENT;
1252
1253 table = rt->rt6i_table;
1254 write_lock_bh(&table->tb6_lock);
1255
1256 err = fib6_del(rt, info);
1257 dst_release(&rt->u.dst);
1258
1259 write_unlock_bh(&table->tb6_lock);
1260
1261 return err;
1262 }
1263
1264 int ip6_del_rt(struct rt6_info *rt)
1265 {
1266 return __ip6_del_rt(rt, NULL);
1267 }
1268
1269 static int ip6_route_del(struct fib6_config *cfg)
1270 {
1271 struct fib6_table *table;
1272 struct fib6_node *fn;
1273 struct rt6_info *rt;
1274 int err = -ESRCH;
1275
1276 table = fib6_get_table(cfg->fc_table);
1277 if (table == NULL)
1278 return err;
1279
1280 read_lock_bh(&table->tb6_lock);
1281
1282 fn = fib6_locate(&table->tb6_root,
1283 &cfg->fc_dst, cfg->fc_dst_len,
1284 &cfg->fc_src, cfg->fc_src_len);
1285
1286 if (fn) {
1287 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1288 if (cfg->fc_ifindex &&
1289 (rt->rt6i_dev == NULL ||
1290 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1291 continue;
1292 if (cfg->fc_flags & RTF_GATEWAY &&
1293 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1294 continue;
1295 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1296 continue;
1297 dst_hold(&rt->u.dst);
1298 read_unlock_bh(&table->tb6_lock);
1299
1300 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1301 }
1302 }
1303 read_unlock_bh(&table->tb6_lock);
1304
1305 return err;
1306 }
1307
1308 /*
1309 * Handle redirects
1310 */
1311 struct ip6rd_flowi {
1312 struct flowi fl;
1313 struct in6_addr gateway;
1314 };
1315
1316 static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
1317 struct flowi *fl,
1318 int flags)
1319 {
1320 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1321 struct rt6_info *rt;
1322 struct fib6_node *fn;
1323
1324 /*
1325 * Get the "current" route for this destination and
1326 * check if the redirect has come from approriate router.
1327 *
1328 * RFC 2461 specifies that redirects should only be
1329 * accepted if they come from the nexthop to the target.
1330 * Due to the way the routes are chosen, this notion
1331 * is a bit fuzzy and one might need to check all possible
1332 * routes.
1333 */
1334
1335 read_lock_bh(&table->tb6_lock);
1336 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1337 restart:
1338 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1339 /*
1340 * Current route is on-link; redirect is always invalid.
1341 *
1342 * Seems, previous statement is not true. It could
1343 * be node, which looks for us as on-link (f.e. proxy ndisc)
1344 * But then router serving it might decide, that we should
1345 * know truth 8)8) --ANK (980726).
1346 */
1347 if (rt6_check_expired(rt))
1348 continue;
1349 if (!(rt->rt6i_flags & RTF_GATEWAY))
1350 continue;
1351 if (fl->oif != rt->rt6i_dev->ifindex)
1352 continue;
1353 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1354 continue;
1355 break;
1356 }
1357
1358 if (!rt)
1359 rt = &ip6_null_entry;
1360 BACKTRACK(&fl->fl6_src);
1361 out:
1362 dst_hold(&rt->u.dst);
1363
1364 read_unlock_bh(&table->tb6_lock);
1365
1366 return rt;
1367 };
1368
1369 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1370 struct in6_addr *src,
1371 struct in6_addr *gateway,
1372 struct net_device *dev)
1373 {
1374 int flags = RT6_LOOKUP_F_HAS_SADDR;
1375 struct ip6rd_flowi rdfl = {
1376 .fl = {
1377 .oif = dev->ifindex,
1378 .nl_u = {
1379 .ip6_u = {
1380 .daddr = *dest,
1381 .saddr = *src,
1382 },
1383 },
1384 },
1385 .gateway = *gateway,
1386 };
1387
1388 if (rt6_need_strict(dest))
1389 flags |= RT6_LOOKUP_F_IFACE;
1390
1391 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect);
1392 }
1393
1394 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1395 struct in6_addr *saddr,
1396 struct neighbour *neigh, u8 *lladdr, int on_link)
1397 {
1398 struct rt6_info *rt, *nrt = NULL;
1399 struct netevent_redirect netevent;
1400
1401 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1402
1403 if (rt == &ip6_null_entry) {
1404 if (net_ratelimit())
1405 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1406 "for redirect target\n");
1407 goto out;
1408 }
1409
1410 /*
1411 * We have finally decided to accept it.
1412 */
1413
1414 neigh_update(neigh, lladdr, NUD_STALE,
1415 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1416 NEIGH_UPDATE_F_OVERRIDE|
1417 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1418 NEIGH_UPDATE_F_ISROUTER))
1419 );
1420
1421 /*
1422 * Redirect received -> path was valid.
1423 * Look, redirects are sent only in response to data packets,
1424 * so that this nexthop apparently is reachable. --ANK
1425 */
1426 dst_confirm(&rt->u.dst);
1427
1428 /* Duplicate redirect: silently ignore. */
1429 if (neigh == rt->u.dst.neighbour)
1430 goto out;
1431
1432 nrt = ip6_rt_copy(rt);
1433 if (nrt == NULL)
1434 goto out;
1435
1436 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1437 if (on_link)
1438 nrt->rt6i_flags &= ~RTF_GATEWAY;
1439
1440 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1441 nrt->rt6i_dst.plen = 128;
1442 nrt->u.dst.flags |= DST_HOST;
1443
1444 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1445 nrt->rt6i_nexthop = neigh_clone(neigh);
1446 /* Reset pmtu, it may be better */
1447 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1448 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
1449
1450 if (ip6_ins_rt(nrt))
1451 goto out;
1452
1453 netevent.old = &rt->u.dst;
1454 netevent.new = &nrt->u.dst;
1455 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1456
1457 if (rt->rt6i_flags&RTF_CACHE) {
1458 ip6_del_rt(rt);
1459 return;
1460 }
1461
1462 out:
1463 dst_release(&rt->u.dst);
1464 return;
1465 }
1466
1467 /*
1468 * Handle ICMP "packet too big" messages
1469 * i.e. Path MTU discovery
1470 */
1471
1472 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1473 struct net_device *dev, u32 pmtu)
1474 {
1475 struct rt6_info *rt, *nrt;
1476 int allfrag = 0;
1477
1478 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0);
1479 if (rt == NULL)
1480 return;
1481
1482 if (pmtu >= dst_mtu(&rt->u.dst))
1483 goto out;
1484
1485 if (pmtu < IPV6_MIN_MTU) {
1486 /*
1487 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1488 * MTU (1280) and a fragment header should always be included
1489 * after a node receiving Too Big message reporting PMTU is
1490 * less than the IPv6 Minimum Link MTU.
1491 */
1492 pmtu = IPV6_MIN_MTU;
1493 allfrag = 1;
1494 }
1495
1496 /* New mtu received -> path was valid.
1497 They are sent only in response to data packets,
1498 so that this nexthop apparently is reachable. --ANK
1499 */
1500 dst_confirm(&rt->u.dst);
1501
1502 /* Host route. If it is static, it would be better
1503 not to override it, but add new one, so that
1504 when cache entry will expire old pmtu
1505 would return automatically.
1506 */
1507 if (rt->rt6i_flags & RTF_CACHE) {
1508 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1509 if (allfrag)
1510 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1511 dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
1512 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1513 goto out;
1514 }
1515
1516 /* Network route.
1517 Two cases are possible:
1518 1. It is connected route. Action: COW
1519 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1520 */
1521 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1522 nrt = rt6_alloc_cow(rt, daddr, saddr);
1523 else
1524 nrt = rt6_alloc_clone(rt, daddr);
1525
1526 if (nrt) {
1527 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1528 if (allfrag)
1529 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1530
1531 /* According to RFC 1981, detecting PMTU increase shouldn't be
1532 * happened within 5 mins, the recommended timer is 10 mins.
1533 * Here this route expiration time is set to ip6_rt_mtu_expires
1534 * which is 10 mins. After 10 mins the decreased pmtu is expired
1535 * and detecting PMTU increase will be automatically happened.
1536 */
1537 dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
1538 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1539
1540 ip6_ins_rt(nrt);
1541 }
1542 out:
1543 dst_release(&rt->u.dst);
1544 }
1545
1546 /*
1547 * Misc support functions
1548 */
1549
1550 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1551 {
1552 struct rt6_info *rt = ip6_dst_alloc();
1553
1554 if (rt) {
1555 rt->u.dst.input = ort->u.dst.input;
1556 rt->u.dst.output = ort->u.dst.output;
1557
1558 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1559 rt->u.dst.error = ort->u.dst.error;
1560 rt->u.dst.dev = ort->u.dst.dev;
1561 if (rt->u.dst.dev)
1562 dev_hold(rt->u.dst.dev);
1563 rt->rt6i_idev = ort->rt6i_idev;
1564 if (rt->rt6i_idev)
1565 in6_dev_hold(rt->rt6i_idev);
1566 rt->u.dst.lastuse = jiffies;
1567 rt->rt6i_expires = 0;
1568
1569 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1570 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1571 rt->rt6i_metric = 0;
1572
1573 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1574 #ifdef CONFIG_IPV6_SUBTREES
1575 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1576 #endif
1577 rt->rt6i_table = ort->rt6i_table;
1578 }
1579 return rt;
1580 }
1581
1582 #ifdef CONFIG_IPV6_ROUTE_INFO
1583 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
1584 struct in6_addr *gwaddr, int ifindex)
1585 {
1586 struct fib6_node *fn;
1587 struct rt6_info *rt = NULL;
1588 struct fib6_table *table;
1589
1590 table = fib6_get_table(RT6_TABLE_INFO);
1591 if (table == NULL)
1592 return NULL;
1593
1594 write_lock_bh(&table->tb6_lock);
1595 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1596 if (!fn)
1597 goto out;
1598
1599 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1600 if (rt->rt6i_dev->ifindex != ifindex)
1601 continue;
1602 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1603 continue;
1604 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1605 continue;
1606 dst_hold(&rt->u.dst);
1607 break;
1608 }
1609 out:
1610 write_unlock_bh(&table->tb6_lock);
1611 return rt;
1612 }
1613
1614 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
1615 struct in6_addr *gwaddr, int ifindex,
1616 unsigned pref)
1617 {
1618 struct fib6_config cfg = {
1619 .fc_table = RT6_TABLE_INFO,
1620 .fc_metric = 1024,
1621 .fc_ifindex = ifindex,
1622 .fc_dst_len = prefixlen,
1623 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1624 RTF_UP | RTF_PREF(pref),
1625 };
1626
1627 ipv6_addr_copy(&cfg.fc_dst, prefix);
1628 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1629
1630 /* We should treat it as a default route if prefix length is 0. */
1631 if (!prefixlen)
1632 cfg.fc_flags |= RTF_DEFAULT;
1633
1634 ip6_route_add(&cfg);
1635
1636 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex);
1637 }
1638 #endif
1639
1640 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1641 {
1642 struct rt6_info *rt;
1643 struct fib6_table *table;
1644
1645 table = fib6_get_table(RT6_TABLE_DFLT);
1646 if (table == NULL)
1647 return NULL;
1648
1649 write_lock_bh(&table->tb6_lock);
1650 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1651 if (dev == rt->rt6i_dev &&
1652 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1653 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1654 break;
1655 }
1656 if (rt)
1657 dst_hold(&rt->u.dst);
1658 write_unlock_bh(&table->tb6_lock);
1659 return rt;
1660 }
1661
1662 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1663 struct net_device *dev,
1664 unsigned int pref)
1665 {
1666 struct fib6_config cfg = {
1667 .fc_table = RT6_TABLE_DFLT,
1668 .fc_metric = 1024,
1669 .fc_ifindex = dev->ifindex,
1670 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1671 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1672 };
1673
1674 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1675
1676 ip6_route_add(&cfg);
1677
1678 return rt6_get_dflt_router(gwaddr, dev);
1679 }
1680
1681 void rt6_purge_dflt_routers(void)
1682 {
1683 struct rt6_info *rt;
1684 struct fib6_table *table;
1685
1686 /* NOTE: Keep consistent with rt6_get_dflt_router */
1687 table = fib6_get_table(RT6_TABLE_DFLT);
1688 if (table == NULL)
1689 return;
1690
1691 restart:
1692 read_lock_bh(&table->tb6_lock);
1693 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1694 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1695 dst_hold(&rt->u.dst);
1696 read_unlock_bh(&table->tb6_lock);
1697 ip6_del_rt(rt);
1698 goto restart;
1699 }
1700 }
1701 read_unlock_bh(&table->tb6_lock);
1702 }
1703
1704 static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
1705 struct fib6_config *cfg)
1706 {
1707 memset(cfg, 0, sizeof(*cfg));
1708
1709 cfg->fc_table = RT6_TABLE_MAIN;
1710 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1711 cfg->fc_metric = rtmsg->rtmsg_metric;
1712 cfg->fc_expires = rtmsg->rtmsg_info;
1713 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1714 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1715 cfg->fc_flags = rtmsg->rtmsg_flags;
1716
1717 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1718 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1719 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1720 }
1721
1722 int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1723 {
1724 struct fib6_config cfg;
1725 struct in6_rtmsg rtmsg;
1726 int err;
1727
1728 switch(cmd) {
1729 case SIOCADDRT: /* Add a route */
1730 case SIOCDELRT: /* Delete a route */
1731 if (!capable(CAP_NET_ADMIN))
1732 return -EPERM;
1733 err = copy_from_user(&rtmsg, arg,
1734 sizeof(struct in6_rtmsg));
1735 if (err)
1736 return -EFAULT;
1737
1738 rtmsg_to_fib6_config(&rtmsg, &cfg);
1739
1740 rtnl_lock();
1741 switch (cmd) {
1742 case SIOCADDRT:
1743 err = ip6_route_add(&cfg);
1744 break;
1745 case SIOCDELRT:
1746 err = ip6_route_del(&cfg);
1747 break;
1748 default:
1749 err = -EINVAL;
1750 }
1751 rtnl_unlock();
1752
1753 return err;
1754 }
1755
1756 return -EINVAL;
1757 }
1758
1759 /*
1760 * Drop the packet on the floor
1761 */
1762
1763 static inline int ip6_pkt_drop(struct sk_buff *skb, int code,
1764 int ipstats_mib_noroutes)
1765 {
1766 int type;
1767 switch (ipstats_mib_noroutes) {
1768 case IPSTATS_MIB_INNOROUTES:
1769 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1770 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
1771 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
1772 break;
1773 }
1774 /* FALLTHROUGH */
1775 case IPSTATS_MIB_OUTNOROUTES:
1776 IP6_INC_STATS(ip6_dst_idev(skb->dst), ipstats_mib_noroutes);
1777 break;
1778 }
1779 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1780 kfree_skb(skb);
1781 return 0;
1782 }
1783
1784 static int ip6_pkt_discard(struct sk_buff *skb)
1785 {
1786 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1787 }
1788
1789 static int ip6_pkt_discard_out(struct sk_buff *skb)
1790 {
1791 skb->dev = skb->dst->dev;
1792 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1793 }
1794
1795 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1796
1797 static int ip6_pkt_prohibit(struct sk_buff *skb)
1798 {
1799 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1800 }
1801
1802 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1803 {
1804 skb->dev = skb->dst->dev;
1805 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1806 }
1807
1808 #endif
1809
1810 /*
1811 * Allocate a dst for local (unicast / anycast) address.
1812 */
1813
1814 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1815 const struct in6_addr *addr,
1816 int anycast)
1817 {
1818 struct rt6_info *rt = ip6_dst_alloc();
1819
1820 if (rt == NULL)
1821 return ERR_PTR(-ENOMEM);
1822
1823 dev_hold(init_net.loopback_dev);
1824 in6_dev_hold(idev);
1825
1826 rt->u.dst.flags = DST_HOST;
1827 rt->u.dst.input = ip6_input;
1828 rt->u.dst.output = ip6_output;
1829 rt->rt6i_dev = init_net.loopback_dev;
1830 rt->rt6i_idev = idev;
1831 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1832 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1833 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1834 rt->u.dst.obsolete = -1;
1835
1836 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1837 if (anycast)
1838 rt->rt6i_flags |= RTF_ANYCAST;
1839 else
1840 rt->rt6i_flags |= RTF_LOCAL;
1841 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1842 if (rt->rt6i_nexthop == NULL) {
1843 dst_free(&rt->u.dst);
1844 return ERR_PTR(-ENOMEM);
1845 }
1846
1847 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1848 rt->rt6i_dst.plen = 128;
1849 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL);
1850
1851 atomic_set(&rt->u.dst.__refcnt, 1);
1852
1853 return rt;
1854 }
1855
1856 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1857 {
1858 if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
1859 rt != &ip6_null_entry) {
1860 RT6_TRACE("deleted by ifdown %p\n", rt);
1861 return -1;
1862 }
1863 return 0;
1864 }
1865
1866 void rt6_ifdown(struct net_device *dev)
1867 {
1868 fib6_clean_all(fib6_ifdown, 0, dev);
1869 }
1870
1871 struct rt6_mtu_change_arg
1872 {
1873 struct net_device *dev;
1874 unsigned mtu;
1875 };
1876
1877 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1878 {
1879 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1880 struct inet6_dev *idev;
1881
1882 /* In IPv6 pmtu discovery is not optional,
1883 so that RTAX_MTU lock cannot disable it.
1884 We still use this lock to block changes
1885 caused by addrconf/ndisc.
1886 */
1887
1888 idev = __in6_dev_get(arg->dev);
1889 if (idev == NULL)
1890 return 0;
1891
1892 /* For administrative MTU increase, there is no way to discover
1893 IPv6 PMTU increase, so PMTU increase should be updated here.
1894 Since RFC 1981 doesn't include administrative MTU increase
1895 update PMTU increase is a MUST. (i.e. jumbo frame)
1896 */
1897 /*
1898 If new MTU is less than route PMTU, this new MTU will be the
1899 lowest MTU in the path, update the route PMTU to reflect PMTU
1900 decreases; if new MTU is greater than route PMTU, and the
1901 old MTU is the lowest MTU in the path, update the route PMTU
1902 to reflect the increase. In this case if the other nodes' MTU
1903 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1904 PMTU discouvery.
1905 */
1906 if (rt->rt6i_dev == arg->dev &&
1907 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1908 (dst_mtu(&rt->u.dst) > arg->mtu ||
1909 (dst_mtu(&rt->u.dst) < arg->mtu &&
1910 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
1911 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1912 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
1913 }
1914 return 0;
1915 }
1916
1917 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1918 {
1919 struct rt6_mtu_change_arg arg = {
1920 .dev = dev,
1921 .mtu = mtu,
1922 };
1923
1924 fib6_clean_all(rt6_mtu_change_route, 0, &arg);
1925 }
1926
1927 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
1928 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
1929 [RTA_OIF] = { .type = NLA_U32 },
1930 [RTA_IIF] = { .type = NLA_U32 },
1931 [RTA_PRIORITY] = { .type = NLA_U32 },
1932 [RTA_METRICS] = { .type = NLA_NESTED },
1933 };
1934
1935 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1936 struct fib6_config *cfg)
1937 {
1938 struct rtmsg *rtm;
1939 struct nlattr *tb[RTA_MAX+1];
1940 int err;
1941
1942 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
1943 if (err < 0)
1944 goto errout;
1945
1946 err = -EINVAL;
1947 rtm = nlmsg_data(nlh);
1948 memset(cfg, 0, sizeof(*cfg));
1949
1950 cfg->fc_table = rtm->rtm_table;
1951 cfg->fc_dst_len = rtm->rtm_dst_len;
1952 cfg->fc_src_len = rtm->rtm_src_len;
1953 cfg->fc_flags = RTF_UP;
1954 cfg->fc_protocol = rtm->rtm_protocol;
1955
1956 if (rtm->rtm_type == RTN_UNREACHABLE)
1957 cfg->fc_flags |= RTF_REJECT;
1958
1959 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
1960 cfg->fc_nlinfo.nlh = nlh;
1961
1962 if (tb[RTA_GATEWAY]) {
1963 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
1964 cfg->fc_flags |= RTF_GATEWAY;
1965 }
1966
1967 if (tb[RTA_DST]) {
1968 int plen = (rtm->rtm_dst_len + 7) >> 3;
1969
1970 if (nla_len(tb[RTA_DST]) < plen)
1971 goto errout;
1972
1973 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
1974 }
1975
1976 if (tb[RTA_SRC]) {
1977 int plen = (rtm->rtm_src_len + 7) >> 3;
1978
1979 if (nla_len(tb[RTA_SRC]) < plen)
1980 goto errout;
1981
1982 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
1983 }
1984
1985 if (tb[RTA_OIF])
1986 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
1987
1988 if (tb[RTA_PRIORITY])
1989 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
1990
1991 if (tb[RTA_METRICS]) {
1992 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
1993 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
1994 }
1995
1996 if (tb[RTA_TABLE])
1997 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
1998
1999 err = 0;
2000 errout:
2001 return err;
2002 }
2003
2004 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2005 {
2006 struct net *net = skb->sk->sk_net;
2007 struct fib6_config cfg;
2008 int err;
2009
2010 if (net != &init_net)
2011 return -EINVAL;
2012
2013 err = rtm_to_fib6_config(skb, nlh, &cfg);
2014 if (err < 0)
2015 return err;
2016
2017 return ip6_route_del(&cfg);
2018 }
2019
2020 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2021 {
2022 struct net *net = skb->sk->sk_net;
2023 struct fib6_config cfg;
2024 int err;
2025
2026 if (net != &init_net)
2027 return -EINVAL;
2028
2029 err = rtm_to_fib6_config(skb, nlh, &cfg);
2030 if (err < 0)
2031 return err;
2032
2033 return ip6_route_add(&cfg);
2034 }
2035
2036 static inline size_t rt6_nlmsg_size(void)
2037 {
2038 return NLMSG_ALIGN(sizeof(struct rtmsg))
2039 + nla_total_size(16) /* RTA_SRC */
2040 + nla_total_size(16) /* RTA_DST */
2041 + nla_total_size(16) /* RTA_GATEWAY */
2042 + nla_total_size(16) /* RTA_PREFSRC */
2043 + nla_total_size(4) /* RTA_TABLE */
2044 + nla_total_size(4) /* RTA_IIF */
2045 + nla_total_size(4) /* RTA_OIF */
2046 + nla_total_size(4) /* RTA_PRIORITY */
2047 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2048 + nla_total_size(sizeof(struct rta_cacheinfo));
2049 }
2050
2051 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2052 struct in6_addr *dst, struct in6_addr *src,
2053 int iif, int type, u32 pid, u32 seq,
2054 int prefix, unsigned int flags)
2055 {
2056 struct rtmsg *rtm;
2057 struct nlmsghdr *nlh;
2058 long expires;
2059 u32 table;
2060
2061 if (prefix) { /* user wants prefix routes only */
2062 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2063 /* success since this is not a prefix route */
2064 return 1;
2065 }
2066 }
2067
2068 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2069 if (nlh == NULL)
2070 return -EMSGSIZE;
2071
2072 rtm = nlmsg_data(nlh);
2073 rtm->rtm_family = AF_INET6;
2074 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2075 rtm->rtm_src_len = rt->rt6i_src.plen;
2076 rtm->rtm_tos = 0;
2077 if (rt->rt6i_table)
2078 table = rt->rt6i_table->tb6_id;
2079 else
2080 table = RT6_TABLE_UNSPEC;
2081 rtm->rtm_table = table;
2082 NLA_PUT_U32(skb, RTA_TABLE, table);
2083 if (rt->rt6i_flags&RTF_REJECT)
2084 rtm->rtm_type = RTN_UNREACHABLE;
2085 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2086 rtm->rtm_type = RTN_LOCAL;
2087 else
2088 rtm->rtm_type = RTN_UNICAST;
2089 rtm->rtm_flags = 0;
2090 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2091 rtm->rtm_protocol = rt->rt6i_protocol;
2092 if (rt->rt6i_flags&RTF_DYNAMIC)
2093 rtm->rtm_protocol = RTPROT_REDIRECT;
2094 else if (rt->rt6i_flags & RTF_ADDRCONF)
2095 rtm->rtm_protocol = RTPROT_KERNEL;
2096 else if (rt->rt6i_flags&RTF_DEFAULT)
2097 rtm->rtm_protocol = RTPROT_RA;
2098
2099 if (rt->rt6i_flags&RTF_CACHE)
2100 rtm->rtm_flags |= RTM_F_CLONED;
2101
2102 if (dst) {
2103 NLA_PUT(skb, RTA_DST, 16, dst);
2104 rtm->rtm_dst_len = 128;
2105 } else if (rtm->rtm_dst_len)
2106 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2107 #ifdef CONFIG_IPV6_SUBTREES
2108 if (src) {
2109 NLA_PUT(skb, RTA_SRC, 16, src);
2110 rtm->rtm_src_len = 128;
2111 } else if (rtm->rtm_src_len)
2112 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2113 #endif
2114 if (iif)
2115 NLA_PUT_U32(skb, RTA_IIF, iif);
2116 else if (dst) {
2117 struct in6_addr saddr_buf;
2118 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
2119 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2120 }
2121
2122 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2123 goto nla_put_failure;
2124
2125 if (rt->u.dst.neighbour)
2126 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2127
2128 if (rt->u.dst.dev)
2129 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2130
2131 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2132
2133 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
2134 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2135 expires, rt->u.dst.error) < 0)
2136 goto nla_put_failure;
2137
2138 return nlmsg_end(skb, nlh);
2139
2140 nla_put_failure:
2141 nlmsg_cancel(skb, nlh);
2142 return -EMSGSIZE;
2143 }
2144
2145 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2146 {
2147 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2148 int prefix;
2149
2150 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2151 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2152 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2153 } else
2154 prefix = 0;
2155
2156 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2157 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2158 prefix, NLM_F_MULTI);
2159 }
2160
2161 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2162 {
2163 struct net *net = in_skb->sk->sk_net;
2164 struct nlattr *tb[RTA_MAX+1];
2165 struct rt6_info *rt;
2166 struct sk_buff *skb;
2167 struct rtmsg *rtm;
2168 struct flowi fl;
2169 int err, iif = 0;
2170
2171 if (net != &init_net)
2172 return -EINVAL;
2173
2174 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2175 if (err < 0)
2176 goto errout;
2177
2178 err = -EINVAL;
2179 memset(&fl, 0, sizeof(fl));
2180
2181 if (tb[RTA_SRC]) {
2182 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2183 goto errout;
2184
2185 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2186 }
2187
2188 if (tb[RTA_DST]) {
2189 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2190 goto errout;
2191
2192 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2193 }
2194
2195 if (tb[RTA_IIF])
2196 iif = nla_get_u32(tb[RTA_IIF]);
2197
2198 if (tb[RTA_OIF])
2199 fl.oif = nla_get_u32(tb[RTA_OIF]);
2200
2201 if (iif) {
2202 struct net_device *dev;
2203 dev = __dev_get_by_index(&init_net, iif);
2204 if (!dev) {
2205 err = -ENODEV;
2206 goto errout;
2207 }
2208 }
2209
2210 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2211 if (skb == NULL) {
2212 err = -ENOBUFS;
2213 goto errout;
2214 }
2215
2216 /* Reserve room for dummy headers, this skb can pass
2217 through good chunk of routing engine.
2218 */
2219 skb_reset_mac_header(skb);
2220 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2221
2222 rt = (struct rt6_info*) ip6_route_output(NULL, &fl);
2223 skb->dst = &rt->u.dst;
2224
2225 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2226 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2227 nlh->nlmsg_seq, 0, 0);
2228 if (err < 0) {
2229 kfree_skb(skb);
2230 goto errout;
2231 }
2232
2233 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
2234 errout:
2235 return err;
2236 }
2237
2238 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2239 {
2240 struct sk_buff *skb;
2241 u32 pid = 0, seq = 0;
2242 struct nlmsghdr *nlh = NULL;
2243 int err = -ENOBUFS;
2244
2245 if (info) {
2246 pid = info->pid;
2247 nlh = info->nlh;
2248 if (nlh)
2249 seq = nlh->nlmsg_seq;
2250 }
2251
2252 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2253 if (skb == NULL)
2254 goto errout;
2255
2256 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
2257 if (err < 0) {
2258 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2259 WARN_ON(err == -EMSGSIZE);
2260 kfree_skb(skb);
2261 goto errout;
2262 }
2263 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
2264 errout:
2265 if (err < 0)
2266 rtnl_set_sk_err(RTNLGRP_IPV6_ROUTE, err);
2267 }
2268
2269 /*
2270 * /proc
2271 */
2272
2273 #ifdef CONFIG_PROC_FS
2274
2275 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2276
2277 struct rt6_proc_arg
2278 {
2279 char *buffer;
2280 int offset;
2281 int length;
2282 int skip;
2283 int len;
2284 };
2285
2286 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2287 {
2288 struct seq_file *m = p_arg;
2289
2290 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
2291 rt->rt6i_dst.plen);
2292
2293 #ifdef CONFIG_IPV6_SUBTREES
2294 seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
2295 rt->rt6i_src.plen);
2296 #else
2297 seq_puts(m, "00000000000000000000000000000000 00 ");
2298 #endif
2299
2300 if (rt->rt6i_nexthop) {
2301 seq_printf(m, NIP6_SEQFMT,
2302 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2303 } else {
2304 seq_puts(m, "00000000000000000000000000000000");
2305 }
2306 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2307 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2308 rt->u.dst.__use, rt->rt6i_flags,
2309 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2310 return 0;
2311 }
2312
2313 static int ipv6_route_show(struct seq_file *m, void *v)
2314 {
2315 fib6_clean_all(rt6_info_route, 0, m);
2316 return 0;
2317 }
2318
2319 static int ipv6_route_open(struct inode *inode, struct file *file)
2320 {
2321 return single_open(file, ipv6_route_show, NULL);
2322 }
2323
2324 static const struct file_operations ipv6_route_proc_fops = {
2325 .owner = THIS_MODULE,
2326 .open = ipv6_route_open,
2327 .read = seq_read,
2328 .llseek = seq_lseek,
2329 .release = single_release,
2330 };
2331
2332 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2333 {
2334 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2335 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
2336 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
2337 rt6_stats.fib_rt_cache,
2338 atomic_read(&ip6_dst_ops.entries),
2339 rt6_stats.fib_discarded_routes);
2340
2341 return 0;
2342 }
2343
2344 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2345 {
2346 return single_open(file, rt6_stats_seq_show, NULL);
2347 }
2348
2349 static const struct file_operations rt6_stats_seq_fops = {
2350 .owner = THIS_MODULE,
2351 .open = rt6_stats_seq_open,
2352 .read = seq_read,
2353 .llseek = seq_lseek,
2354 .release = single_release,
2355 };
2356 #endif /* CONFIG_PROC_FS */
2357
2358 #ifdef CONFIG_SYSCTL
2359
2360 static int flush_delay;
2361
2362 static
2363 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2364 void __user *buffer, size_t *lenp, loff_t *ppos)
2365 {
2366 if (write) {
2367 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2368 fib6_run_gc(flush_delay <= 0 ? ~0UL : (unsigned long)flush_delay);
2369 return 0;
2370 } else
2371 return -EINVAL;
2372 }
2373
2374 ctl_table ipv6_route_table[] = {
2375 {
2376 .procname = "flush",
2377 .data = &flush_delay,
2378 .maxlen = sizeof(int),
2379 .mode = 0200,
2380 .proc_handler = &ipv6_sysctl_rtcache_flush
2381 },
2382 {
2383 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2384 .procname = "gc_thresh",
2385 .data = &ip6_dst_ops.gc_thresh,
2386 .maxlen = sizeof(int),
2387 .mode = 0644,
2388 .proc_handler = &proc_dointvec,
2389 },
2390 {
2391 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2392 .procname = "max_size",
2393 .data = &ip6_rt_max_size,
2394 .maxlen = sizeof(int),
2395 .mode = 0644,
2396 .proc_handler = &proc_dointvec,
2397 },
2398 {
2399 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2400 .procname = "gc_min_interval",
2401 .data = &ip6_rt_gc_min_interval,
2402 .maxlen = sizeof(int),
2403 .mode = 0644,
2404 .proc_handler = &proc_dointvec_jiffies,
2405 .strategy = &sysctl_jiffies,
2406 },
2407 {
2408 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2409 .procname = "gc_timeout",
2410 .data = &ip6_rt_gc_timeout,
2411 .maxlen = sizeof(int),
2412 .mode = 0644,
2413 .proc_handler = &proc_dointvec_jiffies,
2414 .strategy = &sysctl_jiffies,
2415 },
2416 {
2417 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2418 .procname = "gc_interval",
2419 .data = &ip6_rt_gc_interval,
2420 .maxlen = sizeof(int),
2421 .mode = 0644,
2422 .proc_handler = &proc_dointvec_jiffies,
2423 .strategy = &sysctl_jiffies,
2424 },
2425 {
2426 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2427 .procname = "gc_elasticity",
2428 .data = &ip6_rt_gc_elasticity,
2429 .maxlen = sizeof(int),
2430 .mode = 0644,
2431 .proc_handler = &proc_dointvec_jiffies,
2432 .strategy = &sysctl_jiffies,
2433 },
2434 {
2435 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2436 .procname = "mtu_expires",
2437 .data = &ip6_rt_mtu_expires,
2438 .maxlen = sizeof(int),
2439 .mode = 0644,
2440 .proc_handler = &proc_dointvec_jiffies,
2441 .strategy = &sysctl_jiffies,
2442 },
2443 {
2444 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2445 .procname = "min_adv_mss",
2446 .data = &ip6_rt_min_advmss,
2447 .maxlen = sizeof(int),
2448 .mode = 0644,
2449 .proc_handler = &proc_dointvec_jiffies,
2450 .strategy = &sysctl_jiffies,
2451 },
2452 {
2453 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2454 .procname = "gc_min_interval_ms",
2455 .data = &ip6_rt_gc_min_interval,
2456 .maxlen = sizeof(int),
2457 .mode = 0644,
2458 .proc_handler = &proc_dointvec_ms_jiffies,
2459 .strategy = &sysctl_ms_jiffies,
2460 },
2461 { .ctl_name = 0 }
2462 };
2463
2464 #endif
2465
2466 void __init ip6_route_init(void)
2467 {
2468 ip6_dst_ops.kmem_cachep =
2469 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2470 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2471 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
2472
2473 fib6_init();
2474 proc_net_fops_create(&init_net, "ipv6_route", 0, &ipv6_route_proc_fops);
2475 proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2476 #ifdef CONFIG_XFRM
2477 xfrm6_init();
2478 #endif
2479 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2480 fib6_rules_init();
2481 #endif
2482
2483 __rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL);
2484 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL);
2485 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL);
2486 }
2487
2488 void ip6_route_cleanup(void)
2489 {
2490 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2491 fib6_rules_cleanup();
2492 #endif
2493 #ifdef CONFIG_PROC_FS
2494 proc_net_remove(&init_net, "ipv6_route");
2495 proc_net_remove(&init_net, "rt6_stats");
2496 #endif
2497 #ifdef CONFIG_XFRM
2498 xfrm6_fini();
2499 #endif
2500 rt6_ifdown(NULL);
2501 fib6_gc_cleanup();
2502 kmem_cache_destroy(ip6_dst_ops.kmem_cachep);
2503 }