Merge tag 'v3.10.69' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61
62 #include <asm/uaccess.h>
63
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67
68 enum rt6_nud_state {
69 RT6_NUD_FAIL_HARD = -2,
70 RT6_NUD_FAIL_SOFT = -1,
71 RT6_NUD_SUCCEED = 1
72 };
73
74 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
75 const struct in6_addr *dest);
76 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int ip6_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static int ip6_pkt_prohibit(struct sk_buff *skb);
88 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
89 static void ip6_link_failure(struct sk_buff *skb);
90 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
91 struct sk_buff *skb, u32 mtu);
92 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
93 struct sk_buff *skb);
94
95 #ifdef CONFIG_IPV6_ROUTE_INFO
96 static struct rt6_info *rt6_add_route_info(struct net_device *dev,
97 const struct in6_addr *prefix, int prefixlen,
98 const struct in6_addr *gwaddr, unsigned int pref);
99 static struct rt6_info *rt6_get_route_info(struct net_device *dev,
100 const struct in6_addr *prefix, int prefixlen,
101 const struct in6_addr *gwaddr);
102 #endif
103
104 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
105 {
106 struct rt6_info *rt = (struct rt6_info *) dst;
107 struct inet_peer *peer;
108 u32 *p = NULL;
109
110 if (!(rt->dst.flags & DST_HOST))
111 return NULL;
112
113 peer = rt6_get_peer_create(rt);
114 if (peer) {
115 u32 *old_p = __DST_METRICS_PTR(old);
116 unsigned long prev, new;
117
118 p = peer->metrics;
119 if (inet_metrics_new(peer))
120 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
121
122 new = (unsigned long) p;
123 prev = cmpxchg(&dst->_metrics, old, new);
124
125 if (prev != old) {
126 p = __DST_METRICS_PTR(prev);
127 if (prev & DST_METRICS_READ_ONLY)
128 p = NULL;
129 }
130 }
131 return p;
132 }
133
134 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
135 struct sk_buff *skb,
136 const void *daddr)
137 {
138 struct in6_addr *p = &rt->rt6i_gateway;
139
140 if (!ipv6_addr_any(p))
141 return (const void *) p;
142 else if (skb)
143 return &ipv6_hdr(skb)->daddr;
144 return daddr;
145 }
146
147 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
148 struct sk_buff *skb,
149 const void *daddr)
150 {
151 struct rt6_info *rt = (struct rt6_info *) dst;
152 struct neighbour *n;
153
154 daddr = choose_neigh_daddr(rt, skb, daddr);
155 n = __ipv6_neigh_lookup(dst->dev, daddr);
156 if (n)
157 return n;
158 return neigh_create(&nd_tbl, daddr, dst->dev);
159 }
160
161 static struct dst_ops ip6_dst_ops_template = {
162 .family = AF_INET6,
163 .protocol = cpu_to_be16(ETH_P_IPV6),
164 .gc = ip6_dst_gc,
165 .gc_thresh = 1024,
166 .check = ip6_dst_check,
167 .default_advmss = ip6_default_advmss,
168 .mtu = ip6_mtu,
169 .cow_metrics = ipv6_cow_metrics,
170 .destroy = ip6_dst_destroy,
171 .ifdown = ip6_dst_ifdown,
172 .negative_advice = ip6_negative_advice,
173 .link_failure = ip6_link_failure,
174 .update_pmtu = ip6_rt_update_pmtu,
175 .redirect = rt6_do_redirect,
176 .local_out = __ip6_local_out,
177 .neigh_lookup = ip6_neigh_lookup,
178 };
179
180 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
181 {
182 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
183
184 return mtu ? : dst->dev->mtu;
185 }
186
187 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
188 struct sk_buff *skb, u32 mtu)
189 {
190 }
191
192 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
193 struct sk_buff *skb)
194 {
195 }
196
197 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
198 unsigned long old)
199 {
200 return NULL;
201 }
202
203 static struct dst_ops ip6_dst_blackhole_ops = {
204 .family = AF_INET6,
205 .protocol = cpu_to_be16(ETH_P_IPV6),
206 .destroy = ip6_dst_destroy,
207 .check = ip6_dst_check,
208 .mtu = ip6_blackhole_mtu,
209 .default_advmss = ip6_default_advmss,
210 .update_pmtu = ip6_rt_blackhole_update_pmtu,
211 .redirect = ip6_rt_blackhole_redirect,
212 .cow_metrics = ip6_rt_blackhole_cow_metrics,
213 .neigh_lookup = ip6_neigh_lookup,
214 };
215
216 static const u32 ip6_template_metrics[RTAX_MAX] = {
217 [RTAX_HOPLIMIT - 1] = 0,
218 };
219
220 static const struct rt6_info ip6_null_entry_template = {
221 .dst = {
222 .__refcnt = ATOMIC_INIT(1),
223 .__use = 1,
224 .obsolete = DST_OBSOLETE_FORCE_CHK,
225 .error = -ENETUNREACH,
226 .input = ip6_pkt_discard,
227 .output = ip6_pkt_discard_out,
228 },
229 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
230 .rt6i_protocol = RTPROT_KERNEL,
231 .rt6i_metric = ~(u32) 0,
232 .rt6i_ref = ATOMIC_INIT(1),
233 };
234
235 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
236
237 static const struct rt6_info ip6_prohibit_entry_template = {
238 .dst = {
239 .__refcnt = ATOMIC_INIT(1),
240 .__use = 1,
241 .obsolete = DST_OBSOLETE_FORCE_CHK,
242 .error = -EACCES,
243 .input = ip6_pkt_prohibit,
244 .output = ip6_pkt_prohibit_out,
245 },
246 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
247 .rt6i_protocol = RTPROT_KERNEL,
248 .rt6i_metric = ~(u32) 0,
249 .rt6i_ref = ATOMIC_INIT(1),
250 };
251
252 static const struct rt6_info ip6_blk_hole_entry_template = {
253 .dst = {
254 .__refcnt = ATOMIC_INIT(1),
255 .__use = 1,
256 .obsolete = DST_OBSOLETE_FORCE_CHK,
257 .error = -EINVAL,
258 .input = dst_discard,
259 .output = dst_discard,
260 },
261 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
262 .rt6i_protocol = RTPROT_KERNEL,
263 .rt6i_metric = ~(u32) 0,
264 .rt6i_ref = ATOMIC_INIT(1),
265 };
266
267 #endif
268
269 /* allocate dst with ip6_dst_ops */
270 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
271 struct net_device *dev,
272 int flags,
273 struct fib6_table *table)
274 {
275 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
276 0, DST_OBSOLETE_FORCE_CHK, flags);
277
278 if (rt) {
279 struct dst_entry *dst = &rt->dst;
280
281 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
282 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
283 rt->rt6i_genid = rt_genid(net);
284 INIT_LIST_HEAD(&rt->rt6i_siblings);
285 rt->rt6i_nsiblings = 0;
286 }
287 return rt;
288 }
289
290 static void ip6_dst_destroy(struct dst_entry *dst)
291 {
292 struct rt6_info *rt = (struct rt6_info *)dst;
293 struct inet6_dev *idev = rt->rt6i_idev;
294 struct dst_entry *from = dst->from;
295
296 if (!(rt->dst.flags & DST_HOST))
297 dst_destroy_metrics_generic(dst);
298
299 if (idev) {
300 rt->rt6i_idev = NULL;
301 in6_dev_put(idev);
302 }
303
304 dst->from = NULL;
305 dst_release(from);
306
307 if (rt6_has_peer(rt)) {
308 struct inet_peer *peer = rt6_peer_ptr(rt);
309 inet_putpeer(peer);
310 }
311 }
312
313 void rt6_bind_peer(struct rt6_info *rt, int create)
314 {
315 struct inet_peer_base *base;
316 struct inet_peer *peer;
317
318 base = inetpeer_base_ptr(rt->_rt6i_peer);
319 if (!base)
320 return;
321
322 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
323 if (peer) {
324 if (!rt6_set_peer(rt, peer))
325 inet_putpeer(peer);
326 }
327 }
328
329 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
330 int how)
331 {
332 struct rt6_info *rt = (struct rt6_info *)dst;
333 struct inet6_dev *idev = rt->rt6i_idev;
334 struct net_device *loopback_dev =
335 dev_net(dev)->loopback_dev;
336
337 if (dev != loopback_dev) {
338 if (idev && idev->dev == dev) {
339 struct inet6_dev *loopback_idev =
340 in6_dev_get(loopback_dev);
341 if (loopback_idev) {
342 rt->rt6i_idev = loopback_idev;
343 in6_dev_put(idev);
344 }
345 }
346 }
347 }
348
349 static bool rt6_check_expired(const struct rt6_info *rt)
350 {
351 if (rt->rt6i_flags & RTF_EXPIRES) {
352 if (time_after(jiffies, rt->dst.expires))
353 return true;
354 } else if (rt->dst.from) {
355 return rt6_check_expired((struct rt6_info *) rt->dst.from);
356 }
357 return false;
358 }
359
360 static bool rt6_need_strict(const struct in6_addr *daddr)
361 {
362 return ipv6_addr_type(daddr) &
363 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
364 }
365
366 /* Multipath route selection:
367 * Hash based function using packet header and flowlabel.
368 * Adapted from fib_info_hashfn()
369 */
370 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
371 const struct flowi6 *fl6)
372 {
373 unsigned int val = fl6->flowi6_proto;
374
375 val ^= ipv6_addr_hash(&fl6->daddr);
376 val ^= ipv6_addr_hash(&fl6->saddr);
377
378 /* Work only if this not encapsulated */
379 switch (fl6->flowi6_proto) {
380 case IPPROTO_UDP:
381 case IPPROTO_TCP:
382 case IPPROTO_SCTP:
383 val ^= (__force u16)fl6->fl6_sport;
384 val ^= (__force u16)fl6->fl6_dport;
385 break;
386
387 case IPPROTO_ICMPV6:
388 val ^= (__force u16)fl6->fl6_icmp_type;
389 val ^= (__force u16)fl6->fl6_icmp_code;
390 break;
391 }
392 /* RFC6438 recommands to use flowlabel */
393 val ^= (__force u32)fl6->flowlabel;
394
395 /* Perhaps, we need to tune, this function? */
396 val = val ^ (val >> 7) ^ (val >> 12);
397 return val % candidate_count;
398 }
399
400 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
401 struct flowi6 *fl6)
402 {
403 struct rt6_info *sibling, *next_sibling;
404 int route_choosen;
405
406 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
407 /* Don't change the route, if route_choosen == 0
408 * (siblings does not include ourself)
409 */
410 if (route_choosen)
411 list_for_each_entry_safe(sibling, next_sibling,
412 &match->rt6i_siblings, rt6i_siblings) {
413 route_choosen--;
414 if (route_choosen == 0) {
415 match = sibling;
416 break;
417 }
418 }
419 return match;
420 }
421
422 /*
423 * Route lookup. Any table->tb6_lock is implied.
424 */
425
426 static inline struct rt6_info *rt6_device_match(struct net *net,
427 struct rt6_info *rt,
428 const struct in6_addr *saddr,
429 int oif,
430 int flags)
431 {
432 struct rt6_info *local = NULL;
433 struct rt6_info *sprt;
434
435 if (!oif && ipv6_addr_any(saddr))
436 goto out;
437
438 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
439 struct net_device *dev = sprt->dst.dev;
440
441 if (oif) {
442 if (dev->ifindex == oif)
443 return sprt;
444 if (dev->flags & IFF_LOOPBACK) {
445 if (!sprt->rt6i_idev ||
446 sprt->rt6i_idev->dev->ifindex != oif) {
447 if (flags & RT6_LOOKUP_F_IFACE && oif)
448 continue;
449 if (local && (!oif ||
450 local->rt6i_idev->dev->ifindex == oif))
451 continue;
452 }
453 local = sprt;
454 }
455 } else {
456 if (ipv6_chk_addr(net, saddr, dev,
457 flags & RT6_LOOKUP_F_IFACE))
458 return sprt;
459 }
460 }
461
462 if (oif) {
463 if (local)
464 return local;
465
466 if (flags & RT6_LOOKUP_F_IFACE)
467 return net->ipv6.ip6_null_entry;
468 }
469 out:
470 return rt;
471 }
472
473 #ifdef CONFIG_IPV6_ROUTER_PREF
474 struct __rt6_probe_work {
475 struct work_struct work;
476 struct in6_addr target;
477 struct net_device *dev;
478 };
479
480 static void rt6_probe_deferred(struct work_struct *w)
481 {
482 struct in6_addr mcaddr;
483 struct __rt6_probe_work *work =
484 container_of(w, struct __rt6_probe_work, work);
485
486 addrconf_addr_solict_mult(&work->target, &mcaddr);
487 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
488 dev_put(work->dev);
489 kfree(w);
490 }
491
492 static void rt6_probe(struct rt6_info *rt)
493 {
494 struct neighbour *neigh;
495 /*
496 * Okay, this does not seem to be appropriate
497 * for now, however, we need to check if it
498 * is really so; aka Router Reachability Probing.
499 *
500 * Router Reachability Probe MUST be rate-limited
501 * to no more than one per minute.
502 */
503 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
504 return;
505 rcu_read_lock_bh();
506 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
507 if (neigh) {
508 write_lock(&neigh->lock);
509 if (neigh->nud_state & NUD_VALID)
510 goto out;
511 }
512
513 if (!neigh ||
514 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
515 struct __rt6_probe_work *work;
516
517 work = kmalloc(sizeof(*work), GFP_ATOMIC);
518
519 if (neigh && work)
520 neigh->updated = jiffies;
521
522 if (neigh)
523 write_unlock(&neigh->lock);
524
525 if (work) {
526 INIT_WORK(&work->work, rt6_probe_deferred);
527 work->target = rt->rt6i_gateway;
528 dev_hold(rt->dst.dev);
529 work->dev = rt->dst.dev;
530 schedule_work(&work->work);
531 }
532 } else {
533 out:
534 write_unlock(&neigh->lock);
535 }
536 rcu_read_unlock_bh();
537 }
538 #else
539 static inline void rt6_probe(struct rt6_info *rt)
540 {
541 }
542 #endif
543
544 /*
545 * Default Router Selection (RFC 2461 6.3.6)
546 */
547 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
548 {
549 struct net_device *dev = rt->dst.dev;
550 if (!oif || dev->ifindex == oif)
551 return 2;
552 if ((dev->flags & IFF_LOOPBACK) &&
553 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
554 return 1;
555 return 0;
556 }
557
558 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
559 {
560 struct neighbour *neigh;
561 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
562
563 if (rt->rt6i_flags & RTF_NONEXTHOP ||
564 !(rt->rt6i_flags & RTF_GATEWAY))
565 return RT6_NUD_SUCCEED;
566
567 rcu_read_lock_bh();
568 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
569 if (neigh) {
570 read_lock(&neigh->lock);
571 if (neigh->nud_state & NUD_VALID)
572 ret = RT6_NUD_SUCCEED;
573 #ifdef CONFIG_IPV6_ROUTER_PREF
574 else if (!(neigh->nud_state & NUD_FAILED))
575 ret = RT6_NUD_SUCCEED;
576 #endif
577 read_unlock(&neigh->lock);
578 } else {
579 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
580 RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
581 }
582 rcu_read_unlock_bh();
583
584 return ret;
585 }
586
587 static int rt6_score_route(struct rt6_info *rt, int oif,
588 int strict)
589 {
590 int m;
591
592 m = rt6_check_dev(rt, oif);
593 if (!m && (strict & RT6_LOOKUP_F_IFACE))
594 return RT6_NUD_FAIL_HARD;
595 #ifdef CONFIG_IPV6_ROUTER_PREF
596 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
597 #endif
598 if (strict & RT6_LOOKUP_F_REACHABLE) {
599 int n = rt6_check_neigh(rt);
600 if (n < 0)
601 return n;
602 }
603 return m;
604 }
605
606 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
607 int *mpri, struct rt6_info *match,
608 bool *do_rr)
609 {
610 int m;
611 bool match_do_rr = false;
612
613 if (rt6_check_expired(rt))
614 goto out;
615
616 m = rt6_score_route(rt, oif, strict);
617 if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
618 match_do_rr = true;
619 m = 0; /* lowest valid score */
620 } else if (m < 0) {
621 goto out;
622 }
623
624 if (strict & RT6_LOOKUP_F_REACHABLE)
625 rt6_probe(rt);
626
627 if (m > *mpri) {
628 *do_rr = match_do_rr;
629 *mpri = m;
630 match = rt;
631 }
632 out:
633 return match;
634 }
635
636 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
637 struct rt6_info *rr_head,
638 u32 metric, int oif, int strict,
639 bool *do_rr)
640 {
641 struct rt6_info *rt, *match;
642 int mpri = -1;
643
644 match = NULL;
645 for (rt = rr_head; rt && rt->rt6i_metric == metric;
646 rt = rt->dst.rt6_next)
647 match = find_match(rt, oif, strict, &mpri, match, do_rr);
648 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
649 rt = rt->dst.rt6_next)
650 match = find_match(rt, oif, strict, &mpri, match, do_rr);
651
652 return match;
653 }
654
655 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
656 {
657 struct rt6_info *match, *rt0;
658 struct net *net;
659 bool do_rr = false;
660
661 rt0 = fn->rr_ptr;
662 if (!rt0)
663 fn->rr_ptr = rt0 = fn->leaf;
664
665 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
666 &do_rr);
667
668 if (do_rr) {
669 struct rt6_info *next = rt0->dst.rt6_next;
670
671 /* no entries matched; do round-robin */
672 if (!next || next->rt6i_metric != rt0->rt6i_metric)
673 next = fn->leaf;
674
675 if (next != rt0)
676 fn->rr_ptr = next;
677 }
678
679 net = dev_net(rt0->dst.dev);
680 return match ? match : net->ipv6.ip6_null_entry;
681 }
682
683 #ifdef CONFIG_IPV6_ROUTE_INFO
684 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
685 const struct in6_addr *gwaddr)
686 {
687 struct route_info *rinfo = (struct route_info *) opt;
688 struct in6_addr prefix_buf, *prefix;
689 unsigned int pref;
690 unsigned long lifetime;
691 struct rt6_info *rt;
692
693 if (len < sizeof(struct route_info)) {
694 return -EINVAL;
695 }
696
697 /* Sanity check for prefix_len and length */
698 if (rinfo->length > 3) {
699 return -EINVAL;
700 } else if (rinfo->prefix_len > 128) {
701 return -EINVAL;
702 } else if (rinfo->prefix_len > 64) {
703 if (rinfo->length < 2) {
704 return -EINVAL;
705 }
706 } else if (rinfo->prefix_len > 0) {
707 if (rinfo->length < 1) {
708 return -EINVAL;
709 }
710 }
711
712 pref = rinfo->route_pref;
713 if (pref == ICMPV6_ROUTER_PREF_INVALID)
714 return -EINVAL;
715
716 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
717
718 if (rinfo->length == 3)
719 prefix = (struct in6_addr *)rinfo->prefix;
720 else {
721 /* this function is safe */
722 ipv6_addr_prefix(&prefix_buf,
723 (struct in6_addr *)rinfo->prefix,
724 rinfo->prefix_len);
725 prefix = &prefix_buf;
726 }
727
728 if (rinfo->prefix_len == 0)
729 rt = rt6_get_dflt_router(gwaddr, dev);
730 else
731 rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
732
733 if (rt && !lifetime) {
734 ip6_del_rt(rt);
735 rt = NULL;
736 }
737
738 if (!rt && lifetime)
739 rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
740 else if (rt)
741 rt->rt6i_flags = RTF_ROUTEINFO |
742 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
743
744 if (rt) {
745 if (!addrconf_finite_timeout(lifetime))
746 rt6_clean_expires(rt);
747 else
748 rt6_set_expires(rt, jiffies + HZ * lifetime);
749
750 ip6_rt_put(rt);
751 }
752 return 0;
753 }
754 #endif
755
756 #define BACKTRACK(__net, saddr) \
757 do { \
758 if (rt == __net->ipv6.ip6_null_entry) { \
759 struct fib6_node *pn; \
760 while (1) { \
761 if (fn->fn_flags & RTN_TL_ROOT) \
762 goto out; \
763 pn = fn->parent; \
764 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
765 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
766 else \
767 fn = pn; \
768 if (fn->fn_flags & RTN_RTINFO) \
769 goto restart; \
770 } \
771 } \
772 } while (0)
773
774 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
775 struct fib6_table *table,
776 struct flowi6 *fl6, int flags)
777 {
778 struct fib6_node *fn;
779 struct rt6_info *rt;
780
781 read_lock_bh(&table->tb6_lock);
782 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
783 restart:
784 rt = fn->leaf;
785 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
786 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
787 rt = rt6_multipath_select(rt, fl6);
788 BACKTRACK(net, &fl6->saddr);
789 out:
790 dst_use(&rt->dst, jiffies);
791 read_unlock_bh(&table->tb6_lock);
792 return rt;
793
794 }
795
796 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
797 int flags)
798 {
799 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
800 }
801 EXPORT_SYMBOL_GPL(ip6_route_lookup);
802
803 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
804 const struct in6_addr *saddr, int oif, int strict)
805 {
806 struct flowi6 fl6 = {
807 .flowi6_oif = oif,
808 .daddr = *daddr,
809 };
810 struct dst_entry *dst;
811 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
812
813 if (saddr) {
814 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
815 flags |= RT6_LOOKUP_F_HAS_SADDR;
816 }
817
818 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
819 if (dst->error == 0)
820 return (struct rt6_info *) dst;
821
822 dst_release(dst);
823
824 return NULL;
825 }
826
827 EXPORT_SYMBOL(rt6_lookup);
828
829 /* ip6_ins_rt is called with FREE table->tb6_lock.
830 It takes new route entry, the addition fails by any reason the
831 route is freed. In any case, if caller does not hold it, it may
832 be destroyed.
833 */
834
835 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
836 {
837 int err;
838 struct fib6_table *table;
839
840 table = rt->rt6i_table;
841 write_lock_bh(&table->tb6_lock);
842 err = fib6_add(&table->tb6_root, rt, info);
843 write_unlock_bh(&table->tb6_lock);
844
845 return err;
846 }
847
848 int ip6_ins_rt(struct rt6_info *rt)
849 {
850 struct nl_info info = {
851 .nl_net = dev_net(rt->dst.dev),
852 };
853 return __ip6_ins_rt(rt, &info);
854 }
855
856 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
857 const struct in6_addr *daddr,
858 const struct in6_addr *saddr)
859 {
860 struct rt6_info *rt;
861
862 /*
863 * Clone the route.
864 */
865
866 rt = ip6_rt_copy(ort, daddr);
867
868 if (rt) {
869 if (!(rt->rt6i_flags & RTF_GATEWAY)) {
870 if (ort->rt6i_dst.plen != 128 &&
871 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
872 rt->rt6i_flags |= RTF_ANYCAST;
873 }
874
875 rt->rt6i_flags |= RTF_CACHE;
876
877 #ifdef CONFIG_IPV6_SUBTREES
878 if (rt->rt6i_src.plen && saddr) {
879 rt->rt6i_src.addr = *saddr;
880 rt->rt6i_src.plen = 128;
881 }
882 #endif
883 }
884
885 return rt;
886 }
887
888 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
889 const struct in6_addr *daddr)
890 {
891 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
892
893 if (rt)
894 rt->rt6i_flags |= RTF_CACHE;
895 return rt;
896 }
897
898 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
899 struct flowi6 *fl6, int flags)
900 {
901 struct fib6_node *fn;
902 struct rt6_info *rt, *nrt;
903 int strict = 0;
904 int attempts = 3;
905 int err;
906 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
907
908 strict |= flags & RT6_LOOKUP_F_IFACE;
909
910 relookup:
911 read_lock_bh(&table->tb6_lock);
912
913 restart_2:
914 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
915
916 restart:
917 rt = rt6_select(fn, oif, strict | reachable);
918 if (rt->rt6i_nsiblings && oif == 0)
919 rt = rt6_multipath_select(rt, fl6);
920 BACKTRACK(net, &fl6->saddr);
921 if (rt == net->ipv6.ip6_null_entry ||
922 rt->rt6i_flags & RTF_CACHE)
923 goto out;
924
925 dst_hold(&rt->dst);
926 read_unlock_bh(&table->tb6_lock);
927
928 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
929 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
930 else if (!(rt->dst.flags & DST_HOST))
931 nrt = rt6_alloc_clone(rt, &fl6->daddr);
932 else
933 goto out2;
934
935 ip6_rt_put(rt);
936 rt = nrt ? : net->ipv6.ip6_null_entry;
937
938 dst_hold(&rt->dst);
939 if (nrt) {
940 err = ip6_ins_rt(nrt);
941 if (!err)
942 goto out2;
943 }
944
945 if (--attempts <= 0)
946 goto out2;
947
948 /*
949 * Race condition! In the gap, when table->tb6_lock was
950 * released someone could insert this route. Relookup.
951 */
952 ip6_rt_put(rt);
953 goto relookup;
954
955 out:
956 if (reachable) {
957 reachable = 0;
958 goto restart_2;
959 }
960 dst_hold(&rt->dst);
961 read_unlock_bh(&table->tb6_lock);
962 out2:
963 rt->dst.lastuse = jiffies;
964 rt->dst.__use++;
965
966 return rt;
967 }
968
969 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
970 struct flowi6 *fl6, int flags)
971 {
972 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
973 }
974
975 static struct dst_entry *ip6_route_input_lookup(struct net *net,
976 struct net_device *dev,
977 struct flowi6 *fl6, int flags)
978 {
979 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
980 flags |= RT6_LOOKUP_F_IFACE;
981
982 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
983 }
984
985 void ip6_route_input(struct sk_buff *skb)
986 {
987 const struct ipv6hdr *iph = ipv6_hdr(skb);
988 struct net *net = dev_net(skb->dev);
989 int flags = RT6_LOOKUP_F_HAS_SADDR;
990 struct flowi6 fl6 = {
991 .flowi6_iif = skb->dev->ifindex,
992 .daddr = iph->daddr,
993 .saddr = iph->saddr,
994 .flowlabel = ip6_flowinfo(iph),
995 .flowi6_mark = skb->mark,
996 .flowi6_proto = iph->nexthdr,
997 };
998
999 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1000 }
1001
1002 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1003 struct flowi6 *fl6, int flags)
1004 {
1005 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1006 }
1007
1008 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
1009 struct flowi6 *fl6)
1010 {
1011 int flags = 0;
1012
1013 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1014
1015 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1016 flags |= RT6_LOOKUP_F_IFACE;
1017
1018 if (!ipv6_addr_any(&fl6->saddr))
1019 flags |= RT6_LOOKUP_F_HAS_SADDR;
1020 else if (sk)
1021 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1022
1023 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1024 }
1025
1026 EXPORT_SYMBOL(ip6_route_output);
1027
1028 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1029 {
1030 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1031 struct dst_entry *new = NULL;
1032
1033 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1034 if (rt) {
1035 new = &rt->dst;
1036
1037 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1038 rt6_init_peer(rt, net->ipv6.peers);
1039
1040 new->__use = 1;
1041 new->input = dst_discard;
1042 new->output = dst_discard;
1043
1044 if (dst_metrics_read_only(&ort->dst))
1045 new->_metrics = ort->dst._metrics;
1046 else
1047 dst_copy_metrics(new, &ort->dst);
1048 rt->rt6i_idev = ort->rt6i_idev;
1049 if (rt->rt6i_idev)
1050 in6_dev_hold(rt->rt6i_idev);
1051
1052 rt->rt6i_gateway = ort->rt6i_gateway;
1053 rt->rt6i_flags = ort->rt6i_flags;
1054 rt->rt6i_metric = 0;
1055
1056 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1057 #ifdef CONFIG_IPV6_SUBTREES
1058 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1059 #endif
1060
1061 dst_free(new);
1062 }
1063
1064 dst_release(dst_orig);
1065 return new ? new : ERR_PTR(-ENOMEM);
1066 }
1067
1068 /*
1069 * Destination cache support functions
1070 */
1071
1072 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1073 {
1074 struct rt6_info *rt;
1075
1076 rt = (struct rt6_info *) dst;
1077
1078 /* All IPV6 dsts are created with ->obsolete set to the value
1079 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1080 * into this function always.
1081 */
1082 if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
1083 return NULL;
1084
1085 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1086 return NULL;
1087
1088 if (rt6_check_expired(rt))
1089 return NULL;
1090
1091 return dst;
1092 }
1093
1094 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1095 {
1096 struct rt6_info *rt = (struct rt6_info *) dst;
1097
1098 if (rt) {
1099 if (rt->rt6i_flags & RTF_CACHE) {
1100 if (rt6_check_expired(rt)) {
1101 ip6_del_rt(rt);
1102 dst = NULL;
1103 }
1104 } else {
1105 dst_release(dst);
1106 dst = NULL;
1107 }
1108 }
1109 return dst;
1110 }
1111
1112 static void ip6_link_failure(struct sk_buff *skb)
1113 {
1114 struct rt6_info *rt;
1115
1116 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1117
1118 rt = (struct rt6_info *) skb_dst(skb);
1119 if (rt) {
1120 if (rt->rt6i_flags & RTF_CACHE) {
1121 dst_hold(&rt->dst);
1122 if (ip6_del_rt(rt))
1123 dst_free(&rt->dst);
1124 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1125 rt->rt6i_node->fn_sernum = -1;
1126 }
1127 }
1128 }
1129
1130 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1131 struct sk_buff *skb, u32 mtu)
1132 {
1133 struct rt6_info *rt6 = (struct rt6_info*)dst;
1134
1135 dst_confirm(dst);
1136 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1137 struct net *net = dev_net(dst->dev);
1138
1139 rt6->rt6i_flags |= RTF_MODIFIED;
1140 if (mtu < IPV6_MIN_MTU) {
1141 u32 features = dst_metric(dst, RTAX_FEATURES);
1142 mtu = IPV6_MIN_MTU;
1143 features |= RTAX_FEATURE_ALLFRAG;
1144 dst_metric_set(dst, RTAX_FEATURES, features);
1145 }
1146 dst_metric_set(dst, RTAX_MTU, mtu);
1147 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1148 }
1149 }
1150
1151 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1152 int oif, u32 mark, kuid_t uid)
1153 {
1154 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1155 struct dst_entry *dst;
1156 struct flowi6 fl6;
1157
1158 memset(&fl6, 0, sizeof(fl6));
1159 fl6.flowi6_oif = oif;
1160 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1161 fl6.flowi6_flags = 0;
1162 fl6.daddr = iph->daddr;
1163 fl6.saddr = iph->saddr;
1164 fl6.flowlabel = ip6_flowinfo(iph);
1165 fl6.flowi6_uid = uid;
1166
1167 dst = ip6_route_output(net, NULL, &fl6);
1168 if (!dst->error)
1169 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1170 dst_release(dst);
1171 }
1172 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1173
1174 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1175 {
1176 ip6_update_pmtu(skb, sock_net(sk), mtu,
1177 sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
1178 }
1179 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1180
1181 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1182 {
1183 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1184 struct dst_entry *dst;
1185 struct flowi6 fl6;
1186
1187 memset(&fl6, 0, sizeof(fl6));
1188 fl6.flowi6_oif = oif;
1189 fl6.flowi6_mark = mark;
1190 fl6.flowi6_flags = 0;
1191 fl6.daddr = iph->daddr;
1192 fl6.saddr = iph->saddr;
1193 fl6.flowlabel = ip6_flowinfo(iph);
1194
1195 dst = ip6_route_output(net, NULL, &fl6);
1196 if (!dst->error)
1197 rt6_do_redirect(dst, NULL, skb);
1198 dst_release(dst);
1199 }
1200 EXPORT_SYMBOL_GPL(ip6_redirect);
1201
1202 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1203 {
1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1205 }
1206 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1207
1208 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1209 {
1210 struct net_device *dev = dst->dev;
1211 unsigned int mtu = dst_mtu(dst);
1212 struct net *net = dev_net(dev);
1213
1214 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1215
1216 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1217 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1218
1219 /*
1220 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1221 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1222 * IPV6_MAXPLEN is also valid and means: "any MSS,
1223 * rely only on pmtu discovery"
1224 */
1225 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1226 mtu = IPV6_MAXPLEN;
1227 return mtu;
1228 }
1229
1230 static unsigned int ip6_mtu(const struct dst_entry *dst)
1231 {
1232 struct inet6_dev *idev;
1233 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1234
1235 if (mtu)
1236 goto out;
1237
1238 mtu = IPV6_MIN_MTU;
1239
1240 rcu_read_lock();
1241 idev = __in6_dev_get(dst->dev);
1242 if (idev)
1243 mtu = idev->cnf.mtu6;
1244 rcu_read_unlock();
1245
1246 out:
1247 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1248 }
1249
1250 static struct dst_entry *icmp6_dst_gc_list;
1251 static DEFINE_SPINLOCK(icmp6_dst_lock);
1252
1253 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1254 struct flowi6 *fl6)
1255 {
1256 struct dst_entry *dst;
1257 struct rt6_info *rt;
1258 struct inet6_dev *idev = in6_dev_get(dev);
1259 struct net *net = dev_net(dev);
1260
1261 if (unlikely(!idev))
1262 return ERR_PTR(-ENODEV);
1263
1264 rt = ip6_dst_alloc(net, dev, 0, NULL);
1265 if (unlikely(!rt)) {
1266 in6_dev_put(idev);
1267 dst = ERR_PTR(-ENOMEM);
1268 goto out;
1269 }
1270
1271 rt->dst.flags |= DST_HOST;
1272 rt->dst.output = ip6_output;
1273 atomic_set(&rt->dst.__refcnt, 1);
1274 rt->rt6i_gateway = fl6->daddr;
1275 rt->rt6i_dst.addr = fl6->daddr;
1276 rt->rt6i_dst.plen = 128;
1277 rt->rt6i_idev = idev;
1278 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1279
1280 spin_lock_bh(&icmp6_dst_lock);
1281 rt->dst.next = icmp6_dst_gc_list;
1282 icmp6_dst_gc_list = &rt->dst;
1283 spin_unlock_bh(&icmp6_dst_lock);
1284
1285 fib6_force_start_gc(net);
1286
1287 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1288
1289 out:
1290 return dst;
1291 }
1292
1293 int icmp6_dst_gc(void)
1294 {
1295 struct dst_entry *dst, **pprev;
1296 int more = 0;
1297
1298 spin_lock_bh(&icmp6_dst_lock);
1299 pprev = &icmp6_dst_gc_list;
1300
1301 while ((dst = *pprev) != NULL) {
1302 if (!atomic_read(&dst->__refcnt)) {
1303 *pprev = dst->next;
1304 dst_free(dst);
1305 } else {
1306 pprev = &dst->next;
1307 ++more;
1308 }
1309 }
1310
1311 spin_unlock_bh(&icmp6_dst_lock);
1312
1313 return more;
1314 }
1315
1316 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1317 void *arg)
1318 {
1319 struct dst_entry *dst, **pprev;
1320
1321 spin_lock_bh(&icmp6_dst_lock);
1322 pprev = &icmp6_dst_gc_list;
1323 while ((dst = *pprev) != NULL) {
1324 struct rt6_info *rt = (struct rt6_info *) dst;
1325 if (func(rt, arg)) {
1326 *pprev = dst->next;
1327 dst_free(dst);
1328 } else {
1329 pprev = &dst->next;
1330 }
1331 }
1332 spin_unlock_bh(&icmp6_dst_lock);
1333 }
1334
1335 static int ip6_dst_gc(struct dst_ops *ops)
1336 {
1337 unsigned long now = jiffies;
1338 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1339 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1340 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1341 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1342 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1343 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1344 int entries;
1345
1346 entries = dst_entries_get_fast(ops);
1347 if (time_after(rt_last_gc + rt_min_interval, now) &&
1348 entries <= rt_max_size)
1349 goto out;
1350
1351 net->ipv6.ip6_rt_gc_expire++;
1352 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1353 net->ipv6.ip6_rt_last_gc = now;
1354 entries = dst_entries_get_slow(ops);
1355 if (entries < ops->gc_thresh)
1356 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1357 out:
1358 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1359 return entries > rt_max_size;
1360 }
1361
1362 int ip6_dst_hoplimit(struct dst_entry *dst)
1363 {
1364 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1365 if (hoplimit == 0) {
1366 struct net_device *dev = dst->dev;
1367 struct inet6_dev *idev;
1368
1369 rcu_read_lock();
1370 idev = __in6_dev_get(dev);
1371 if (idev)
1372 hoplimit = idev->cnf.hop_limit;
1373 else
1374 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1375 rcu_read_unlock();
1376 }
1377 return hoplimit;
1378 }
1379 EXPORT_SYMBOL(ip6_dst_hoplimit);
1380
1381 /*
1382 *
1383 */
1384
1385 int ip6_route_add(struct fib6_config *cfg)
1386 {
1387 int err;
1388 struct net *net = cfg->fc_nlinfo.nl_net;
1389 struct rt6_info *rt = NULL;
1390 struct net_device *dev = NULL;
1391 struct inet6_dev *idev = NULL;
1392 struct fib6_table *table;
1393 int addr_type;
1394
1395 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1396 return -EINVAL;
1397 #ifndef CONFIG_IPV6_SUBTREES
1398 if (cfg->fc_src_len)
1399 return -EINVAL;
1400 #endif
1401 if (cfg->fc_ifindex) {
1402 err = -ENODEV;
1403 dev = dev_get_by_index(net, cfg->fc_ifindex);
1404 if (!dev)
1405 goto out;
1406 idev = in6_dev_get(dev);
1407 if (!idev)
1408 goto out;
1409 }
1410
1411 if (cfg->fc_metric == 0)
1412 cfg->fc_metric = IP6_RT_PRIO_USER;
1413
1414 err = -ENOBUFS;
1415 if (cfg->fc_nlinfo.nlh &&
1416 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1417 table = fib6_get_table(net, cfg->fc_table);
1418 if (!table) {
1419 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1420 table = fib6_new_table(net, cfg->fc_table);
1421 }
1422 } else {
1423 table = fib6_new_table(net, cfg->fc_table);
1424 }
1425
1426 if (!table)
1427 goto out;
1428
1429 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1430
1431 if (!rt) {
1432 err = -ENOMEM;
1433 goto out;
1434 }
1435
1436 if (cfg->fc_flags & RTF_EXPIRES)
1437 rt6_set_expires(rt, jiffies +
1438 clock_t_to_jiffies(cfg->fc_expires));
1439 else
1440 rt6_clean_expires(rt);
1441
1442 if (cfg->fc_protocol == RTPROT_UNSPEC)
1443 cfg->fc_protocol = RTPROT_BOOT;
1444 rt->rt6i_protocol = cfg->fc_protocol;
1445
1446 addr_type = ipv6_addr_type(&cfg->fc_dst);
1447
1448 if (addr_type & IPV6_ADDR_MULTICAST)
1449 rt->dst.input = ip6_mc_input;
1450 else if (cfg->fc_flags & RTF_LOCAL)
1451 rt->dst.input = ip6_input;
1452 else
1453 rt->dst.input = ip6_forward;
1454
1455 rt->dst.output = ip6_output;
1456
1457 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1458 rt->rt6i_dst.plen = cfg->fc_dst_len;
1459 if (rt->rt6i_dst.plen == 128)
1460 rt->dst.flags |= DST_HOST;
1461
1462 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1463 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1464 if (!metrics) {
1465 err = -ENOMEM;
1466 goto out;
1467 }
1468 dst_init_metrics(&rt->dst, metrics, 0);
1469 }
1470 #ifdef CONFIG_IPV6_SUBTREES
1471 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1472 rt->rt6i_src.plen = cfg->fc_src_len;
1473 #endif
1474
1475 rt->rt6i_metric = cfg->fc_metric;
1476
1477 /* We cannot add true routes via loopback here,
1478 they would result in kernel looping; promote them to reject routes
1479 */
1480 if ((cfg->fc_flags & RTF_REJECT) ||
1481 (dev && (dev->flags & IFF_LOOPBACK) &&
1482 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1483 !(cfg->fc_flags & RTF_LOCAL))) {
1484 /* hold loopback dev/idev if we haven't done so. */
1485 if (dev != net->loopback_dev) {
1486 if (dev) {
1487 dev_put(dev);
1488 in6_dev_put(idev);
1489 }
1490 dev = net->loopback_dev;
1491 dev_hold(dev);
1492 idev = in6_dev_get(dev);
1493 if (!idev) {
1494 err = -ENODEV;
1495 goto out;
1496 }
1497 }
1498 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1499 switch (cfg->fc_type) {
1500 case RTN_BLACKHOLE:
1501 rt->dst.error = -EINVAL;
1502 rt->dst.output = dst_discard;
1503 rt->dst.input = dst_discard;
1504 break;
1505 case RTN_PROHIBIT:
1506 rt->dst.error = -EACCES;
1507 rt->dst.output = ip6_pkt_prohibit_out;
1508 rt->dst.input = ip6_pkt_prohibit;
1509 break;
1510 case RTN_THROW:
1511 default:
1512 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1513 : -ENETUNREACH;
1514 rt->dst.output = ip6_pkt_discard_out;
1515 rt->dst.input = ip6_pkt_discard;
1516 break;
1517 }
1518 goto install_route;
1519 }
1520
1521 if (cfg->fc_flags & RTF_GATEWAY) {
1522 const struct in6_addr *gw_addr;
1523 int gwa_type;
1524
1525 gw_addr = &cfg->fc_gateway;
1526 rt->rt6i_gateway = *gw_addr;
1527 gwa_type = ipv6_addr_type(gw_addr);
1528
1529 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1530 struct rt6_info *grt;
1531
1532 /* IPv6 strictly inhibits using not link-local
1533 addresses as nexthop address.
1534 Otherwise, router will not able to send redirects.
1535 It is very good, but in some (rare!) circumstances
1536 (SIT, PtP, NBMA NOARP links) it is handy to allow
1537 some exceptions. --ANK
1538 */
1539 err = -EINVAL;
1540 if (!(gwa_type & IPV6_ADDR_UNICAST))
1541 goto out;
1542
1543 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1544
1545 err = -EHOSTUNREACH;
1546 if (!grt)
1547 goto out;
1548 if (dev) {
1549 if (dev != grt->dst.dev) {
1550 ip6_rt_put(grt);
1551 goto out;
1552 }
1553 } else {
1554 dev = grt->dst.dev;
1555 idev = grt->rt6i_idev;
1556 dev_hold(dev);
1557 in6_dev_hold(grt->rt6i_idev);
1558 }
1559 if (!(grt->rt6i_flags & RTF_GATEWAY))
1560 err = 0;
1561 ip6_rt_put(grt);
1562
1563 if (err)
1564 goto out;
1565 }
1566 err = -EINVAL;
1567 if (!dev || (dev->flags & IFF_LOOPBACK))
1568 goto out;
1569 }
1570
1571 err = -ENODEV;
1572 if (!dev)
1573 goto out;
1574
1575 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1576 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1577 err = -EINVAL;
1578 goto out;
1579 }
1580 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1581 rt->rt6i_prefsrc.plen = 128;
1582 } else
1583 rt->rt6i_prefsrc.plen = 0;
1584
1585 rt->rt6i_flags = cfg->fc_flags;
1586
1587 install_route:
1588 if (cfg->fc_mx) {
1589 struct nlattr *nla;
1590 int remaining;
1591
1592 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1593 int type = nla_type(nla);
1594
1595 if (type) {
1596 if (type > RTAX_MAX) {
1597 err = -EINVAL;
1598 goto out;
1599 }
1600
1601 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1602 }
1603 }
1604 }
1605
1606 rt->dst.dev = dev;
1607 rt->rt6i_idev = idev;
1608 rt->rt6i_table = table;
1609
1610 cfg->fc_nlinfo.nl_net = dev_net(dev);
1611
1612 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1613
1614 out:
1615 if (dev)
1616 dev_put(dev);
1617 if (idev)
1618 in6_dev_put(idev);
1619 if (rt)
1620 dst_free(&rt->dst);
1621 return err;
1622 }
1623
1624 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1625 {
1626 int err;
1627 struct fib6_table *table;
1628 struct net *net = dev_net(rt->dst.dev);
1629
1630 if (rt == net->ipv6.ip6_null_entry) {
1631 err = -ENOENT;
1632 goto out;
1633 }
1634
1635 table = rt->rt6i_table;
1636 write_lock_bh(&table->tb6_lock);
1637 err = fib6_del(rt, info);
1638 write_unlock_bh(&table->tb6_lock);
1639
1640 out:
1641 ip6_rt_put(rt);
1642 return err;
1643 }
1644
1645 int ip6_del_rt(struct rt6_info *rt)
1646 {
1647 struct nl_info info = {
1648 .nl_net = dev_net(rt->dst.dev),
1649 };
1650 return __ip6_del_rt(rt, &info);
1651 }
1652
1653 static int ip6_route_del(struct fib6_config *cfg)
1654 {
1655 struct fib6_table *table;
1656 struct fib6_node *fn;
1657 struct rt6_info *rt;
1658 int err = -ESRCH;
1659
1660 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1661 if (!table)
1662 return err;
1663
1664 read_lock_bh(&table->tb6_lock);
1665
1666 fn = fib6_locate(&table->tb6_root,
1667 &cfg->fc_dst, cfg->fc_dst_len,
1668 &cfg->fc_src, cfg->fc_src_len);
1669
1670 if (fn) {
1671 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1672 if (cfg->fc_ifindex &&
1673 (!rt->dst.dev ||
1674 rt->dst.dev->ifindex != cfg->fc_ifindex))
1675 continue;
1676 if (cfg->fc_flags & RTF_GATEWAY &&
1677 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1678 continue;
1679 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1680 continue;
1681 dst_hold(&rt->dst);
1682 read_unlock_bh(&table->tb6_lock);
1683
1684 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1685 }
1686 }
1687 read_unlock_bh(&table->tb6_lock);
1688
1689 return err;
1690 }
1691
1692 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1693 {
1694 struct net *net = dev_net(skb->dev);
1695 struct netevent_redirect netevent;
1696 struct rt6_info *rt, *nrt = NULL;
1697 struct ndisc_options ndopts;
1698 struct inet6_dev *in6_dev;
1699 struct neighbour *neigh;
1700 struct rd_msg *msg;
1701 int optlen, on_link;
1702 u8 *lladdr;
1703
1704 optlen = skb->tail - skb->transport_header;
1705 optlen -= sizeof(*msg);
1706
1707 if (optlen < 0) {
1708 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1709 return;
1710 }
1711
1712 msg = (struct rd_msg *)icmp6_hdr(skb);
1713
1714 if (ipv6_addr_is_multicast(&msg->dest)) {
1715 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1716 return;
1717 }
1718
1719 on_link = 0;
1720 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1721 on_link = 1;
1722 } else if (ipv6_addr_type(&msg->target) !=
1723 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1724 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1725 return;
1726 }
1727
1728 in6_dev = __in6_dev_get(skb->dev);
1729 if (!in6_dev)
1730 return;
1731 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1732 return;
1733
1734 /* RFC2461 8.1:
1735 * The IP source address of the Redirect MUST be the same as the current
1736 * first-hop router for the specified ICMP Destination Address.
1737 */
1738
1739 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1740 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1741 return;
1742 }
1743
1744 lladdr = NULL;
1745 if (ndopts.nd_opts_tgt_lladdr) {
1746 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1747 skb->dev);
1748 if (!lladdr) {
1749 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1750 return;
1751 }
1752 }
1753
1754 rt = (struct rt6_info *) dst;
1755 if (rt == net->ipv6.ip6_null_entry) {
1756 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1757 return;
1758 }
1759
1760 /* Redirect received -> path was valid.
1761 * Look, redirects are sent only in response to data packets,
1762 * so that this nexthop apparently is reachable. --ANK
1763 */
1764 dst_confirm(&rt->dst);
1765
1766 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1767 if (!neigh)
1768 return;
1769
1770 /*
1771 * We have finally decided to accept it.
1772 */
1773
1774 neigh_update(neigh, lladdr, NUD_STALE,
1775 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1776 NEIGH_UPDATE_F_OVERRIDE|
1777 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1778 NEIGH_UPDATE_F_ISROUTER))
1779 );
1780
1781 nrt = ip6_rt_copy(rt, &msg->dest);
1782 if (!nrt)
1783 goto out;
1784
1785 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1786 if (on_link)
1787 nrt->rt6i_flags &= ~RTF_GATEWAY;
1788
1789 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1790
1791 if (ip6_ins_rt(nrt))
1792 goto out;
1793
1794 netevent.old = &rt->dst;
1795 netevent.new = &nrt->dst;
1796 netevent.daddr = &msg->dest;
1797 netevent.neigh = neigh;
1798 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1799
1800 if (rt->rt6i_flags & RTF_CACHE) {
1801 rt = (struct rt6_info *) dst_clone(&rt->dst);
1802 ip6_del_rt(rt);
1803 }
1804
1805 out:
1806 neigh_release(neigh);
1807 }
1808
1809 /*
1810 * Misc support functions
1811 */
1812
1813 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1814 const struct in6_addr *dest)
1815 {
1816 struct net *net = dev_net(ort->dst.dev);
1817 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1818 ort->rt6i_table);
1819
1820 if (rt) {
1821 rt->dst.input = ort->dst.input;
1822 rt->dst.output = ort->dst.output;
1823 rt->dst.flags |= DST_HOST;
1824
1825 rt->rt6i_dst.addr = *dest;
1826 rt->rt6i_dst.plen = 128;
1827 dst_copy_metrics(&rt->dst, &ort->dst);
1828 rt->dst.error = ort->dst.error;
1829 rt->rt6i_idev = ort->rt6i_idev;
1830 if (rt->rt6i_idev)
1831 in6_dev_hold(rt->rt6i_idev);
1832 rt->dst.lastuse = jiffies;
1833
1834 if (ort->rt6i_flags & RTF_GATEWAY)
1835 rt->rt6i_gateway = ort->rt6i_gateway;
1836 else
1837 rt->rt6i_gateway = *dest;
1838 rt->rt6i_flags = ort->rt6i_flags;
1839 rt6_set_from(rt, ort);
1840 rt->rt6i_metric = 0;
1841
1842 #ifdef CONFIG_IPV6_SUBTREES
1843 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1844 #endif
1845 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1846 rt->rt6i_table = ort->rt6i_table;
1847 }
1848 return rt;
1849 }
1850
1851 #ifdef CONFIG_IPV6_ROUTE_INFO
1852 static struct rt6_info *rt6_get_route_info(struct net_device *dev,
1853 const struct in6_addr *prefix, int prefixlen,
1854 const struct in6_addr *gwaddr)
1855 {
1856 struct fib6_node *fn;
1857 struct rt6_info *rt = NULL;
1858 struct fib6_table *table;
1859
1860 table = fib6_get_table(dev_net(dev),
1861 addrconf_rt_table(dev, RT6_TABLE_INFO));
1862 if (!table)
1863 return NULL;
1864
1865 read_lock_bh(&table->tb6_lock);
1866 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1867 if (!fn)
1868 goto out;
1869
1870 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1871 if (rt->dst.dev->ifindex != dev->ifindex)
1872 continue;
1873 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1874 continue;
1875 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1876 continue;
1877 dst_hold(&rt->dst);
1878 break;
1879 }
1880 out:
1881 read_unlock_bh(&table->tb6_lock);
1882 return rt;
1883 }
1884
1885 static struct rt6_info *rt6_add_route_info(struct net_device *dev,
1886 const struct in6_addr *prefix, int prefixlen,
1887 const struct in6_addr *gwaddr, unsigned int pref)
1888 {
1889 struct fib6_config cfg = {
1890 .fc_table = addrconf_rt_table(dev, RT6_TABLE_INFO),
1891 .fc_metric = IP6_RT_PRIO_USER,
1892 .fc_ifindex = dev->ifindex,
1893 .fc_dst_len = prefixlen,
1894 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1895 RTF_UP | RTF_PREF(pref),
1896 .fc_nlinfo.portid = 0,
1897 .fc_nlinfo.nlh = NULL,
1898 .fc_nlinfo.nl_net = dev_net(dev),
1899 };
1900
1901 cfg.fc_dst = *prefix;
1902 cfg.fc_gateway = *gwaddr;
1903
1904 /* We should treat it as a default route if prefix length is 0. */
1905 if (!prefixlen)
1906 cfg.fc_flags |= RTF_DEFAULT;
1907
1908 ip6_route_add(&cfg);
1909
1910 return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
1911 }
1912 #endif
1913
1914 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1915 {
1916 struct rt6_info *rt;
1917 struct fib6_table *table;
1918
1919 table = fib6_get_table(dev_net(dev),
1920 addrconf_rt_table(dev, RT6_TABLE_MAIN));
1921 if (!table)
1922 return NULL;
1923
1924 read_lock_bh(&table->tb6_lock);
1925 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1926 if (dev == rt->dst.dev &&
1927 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1928 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1929 break;
1930 }
1931 if (rt)
1932 dst_hold(&rt->dst);
1933 read_unlock_bh(&table->tb6_lock);
1934 return rt;
1935 }
1936
1937 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1938 struct net_device *dev,
1939 unsigned int pref)
1940 {
1941 struct fib6_config cfg = {
1942 .fc_table = addrconf_rt_table(dev, RT6_TABLE_DFLT),
1943 .fc_metric = IP6_RT_PRIO_USER,
1944 .fc_ifindex = dev->ifindex,
1945 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1946 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1947 .fc_nlinfo.portid = 0,
1948 .fc_nlinfo.nlh = NULL,
1949 .fc_nlinfo.nl_net = dev_net(dev),
1950 };
1951
1952 cfg.fc_gateway = *gwaddr;
1953
1954 ip6_route_add(&cfg);
1955
1956 return rt6_get_dflt_router(gwaddr, dev);
1957 }
1958
1959
1960 int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
1961 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1962 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
1963 return -1;
1964 return 0;
1965 }
1966
1967 void rt6_purge_dflt_routers(struct net *net)
1968 {
1969 fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
1970 }
1971
1972 static void rtmsg_to_fib6_config(struct net *net,
1973 struct in6_rtmsg *rtmsg,
1974 struct fib6_config *cfg)
1975 {
1976 memset(cfg, 0, sizeof(*cfg));
1977
1978 cfg->fc_table = RT6_TABLE_MAIN;
1979 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1980 cfg->fc_metric = rtmsg->rtmsg_metric;
1981 cfg->fc_expires = rtmsg->rtmsg_info;
1982 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1983 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1984 cfg->fc_flags = rtmsg->rtmsg_flags;
1985
1986 cfg->fc_nlinfo.nl_net = net;
1987
1988 cfg->fc_dst = rtmsg->rtmsg_dst;
1989 cfg->fc_src = rtmsg->rtmsg_src;
1990 cfg->fc_gateway = rtmsg->rtmsg_gateway;
1991 }
1992
1993 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1994 {
1995 struct fib6_config cfg;
1996 struct in6_rtmsg rtmsg;
1997 int err;
1998
1999 switch(cmd) {
2000 case SIOCADDRT: /* Add a route */
2001 case SIOCDELRT: /* Delete a route */
2002 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2003 return -EPERM;
2004 err = copy_from_user(&rtmsg, arg,
2005 sizeof(struct in6_rtmsg));
2006 if (err)
2007 return -EFAULT;
2008
2009 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2010
2011 rtnl_lock();
2012 switch (cmd) {
2013 case SIOCADDRT:
2014 err = ip6_route_add(&cfg);
2015 break;
2016 case SIOCDELRT:
2017 err = ip6_route_del(&cfg);
2018 break;
2019 default:
2020 err = -EINVAL;
2021 }
2022 rtnl_unlock();
2023
2024 return err;
2025 }
2026
2027 return -EINVAL;
2028 }
2029
2030 /*
2031 * Drop the packet on the floor
2032 */
2033
2034 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2035 {
2036 int type;
2037 struct dst_entry *dst = skb_dst(skb);
2038 switch (ipstats_mib_noroutes) {
2039 case IPSTATS_MIB_INNOROUTES:
2040 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2041 if (type == IPV6_ADDR_ANY) {
2042 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2043 IPSTATS_MIB_INADDRERRORS);
2044 break;
2045 }
2046 /* FALLTHROUGH */
2047 case IPSTATS_MIB_OUTNOROUTES:
2048 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2049 ipstats_mib_noroutes);
2050 break;
2051 }
2052 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2053 kfree_skb(skb);
2054 return 0;
2055 }
2056
2057 static int ip6_pkt_discard(struct sk_buff *skb)
2058 {
2059 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2060 }
2061
2062 static int ip6_pkt_discard_out(struct sk_buff *skb)
2063 {
2064 skb->dev = skb_dst(skb)->dev;
2065 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2066 }
2067
2068 static int ip6_pkt_prohibit(struct sk_buff *skb)
2069 {
2070 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2071 }
2072
2073 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2074 {
2075 skb->dev = skb_dst(skb)->dev;
2076 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2077 }
2078
2079 /*
2080 * Allocate a dst for local (unicast / anycast) address.
2081 */
2082
2083 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2084 const struct in6_addr *addr,
2085 bool anycast)
2086 {
2087 struct net *net = dev_net(idev->dev);
2088 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2089 DST_NOCOUNT, NULL);
2090 if (!rt)
2091 return ERR_PTR(-ENOMEM);
2092
2093 in6_dev_hold(idev);
2094
2095 rt->dst.flags |= DST_HOST;
2096 rt->dst.input = ip6_input;
2097 rt->dst.output = ip6_output;
2098 rt->rt6i_idev = idev;
2099
2100 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2101 if (anycast)
2102 rt->rt6i_flags |= RTF_ANYCAST;
2103 else
2104 rt->rt6i_flags |= RTF_LOCAL;
2105
2106 rt->rt6i_gateway = *addr;
2107 rt->rt6i_dst.addr = *addr;
2108 rt->rt6i_dst.plen = 128;
2109 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2110
2111 atomic_set(&rt->dst.__refcnt, 1);
2112
2113 return rt;
2114 }
2115
2116 int ip6_route_get_saddr(struct net *net,
2117 struct rt6_info *rt,
2118 const struct in6_addr *daddr,
2119 unsigned int prefs,
2120 struct in6_addr *saddr)
2121 {
2122 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2123 int err = 0;
2124 if (rt->rt6i_prefsrc.plen)
2125 *saddr = rt->rt6i_prefsrc.addr;
2126 else
2127 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2128 daddr, prefs, saddr);
2129 return err;
2130 }
2131
2132 /* remove deleted ip from prefsrc entries */
2133 struct arg_dev_net_ip {
2134 struct net_device *dev;
2135 struct net *net;
2136 struct in6_addr *addr;
2137 };
2138
2139 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2140 {
2141 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2142 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2143 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2144
2145 if (((void *)rt->dst.dev == dev || !dev) &&
2146 rt != net->ipv6.ip6_null_entry &&
2147 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2148 /* remove prefsrc entry */
2149 rt->rt6i_prefsrc.plen = 0;
2150 }
2151 return 0;
2152 }
2153
2154 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2155 {
2156 struct net *net = dev_net(ifp->idev->dev);
2157 struct arg_dev_net_ip adni = {
2158 .dev = ifp->idev->dev,
2159 .net = net,
2160 .addr = &ifp->addr,
2161 };
2162 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2163 }
2164
2165 struct arg_dev_net {
2166 struct net_device *dev;
2167 struct net *net;
2168 };
2169
2170 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2171 {
2172 const struct arg_dev_net *adn = arg;
2173 const struct net_device *dev = adn->dev;
2174
2175 if ((rt->dst.dev == dev || !dev) &&
2176 rt != adn->net->ipv6.ip6_null_entry)
2177 return -1;
2178
2179 return 0;
2180 }
2181
2182 void rt6_ifdown(struct net *net, struct net_device *dev)
2183 {
2184 struct arg_dev_net adn = {
2185 .dev = dev,
2186 .net = net,
2187 };
2188
2189 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2190 icmp6_clean_all(fib6_ifdown, &adn);
2191 }
2192
2193 struct rt6_mtu_change_arg {
2194 struct net_device *dev;
2195 unsigned int mtu;
2196 };
2197
2198 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2199 {
2200 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2201 struct inet6_dev *idev;
2202
2203 /* In IPv6 pmtu discovery is not optional,
2204 so that RTAX_MTU lock cannot disable it.
2205 We still use this lock to block changes
2206 caused by addrconf/ndisc.
2207 */
2208
2209 idev = __in6_dev_get(arg->dev);
2210 if (!idev)
2211 return 0;
2212
2213 /* For administrative MTU increase, there is no way to discover
2214 IPv6 PMTU increase, so PMTU increase should be updated here.
2215 Since RFC 1981 doesn't include administrative MTU increase
2216 update PMTU increase is a MUST. (i.e. jumbo frame)
2217 */
2218 /*
2219 If new MTU is less than route PMTU, this new MTU will be the
2220 lowest MTU in the path, update the route PMTU to reflect PMTU
2221 decreases; if new MTU is greater than route PMTU, and the
2222 old MTU is the lowest MTU in the path, update the route PMTU
2223 to reflect the increase. In this case if the other nodes' MTU
2224 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2225 PMTU discouvery.
2226 */
2227 if (rt->dst.dev == arg->dev &&
2228 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2229 (dst_mtu(&rt->dst) >= arg->mtu ||
2230 (dst_mtu(&rt->dst) < arg->mtu &&
2231 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2232 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2233 }
2234 return 0;
2235 }
2236
2237 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2238 {
2239 struct rt6_mtu_change_arg arg = {
2240 .dev = dev,
2241 .mtu = mtu,
2242 };
2243
2244 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2245 }
2246
2247 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2248 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2249 [RTA_OIF] = { .type = NLA_U32 },
2250 [RTA_IIF] = { .type = NLA_U32 },
2251 [RTA_PRIORITY] = { .type = NLA_U32 },
2252 [RTA_METRICS] = { .type = NLA_NESTED },
2253 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2254 [RTA_UID] = { .type = NLA_U32 },
2255 };
2256
2257 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2258 struct fib6_config *cfg)
2259 {
2260 struct rtmsg *rtm;
2261 struct nlattr *tb[RTA_MAX+1];
2262 int err;
2263
2264 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2265 if (err < 0)
2266 goto errout;
2267
2268 err = -EINVAL;
2269 rtm = nlmsg_data(nlh);
2270 memset(cfg, 0, sizeof(*cfg));
2271
2272 cfg->fc_table = rtm->rtm_table;
2273 cfg->fc_dst_len = rtm->rtm_dst_len;
2274 cfg->fc_src_len = rtm->rtm_src_len;
2275 cfg->fc_flags = RTF_UP;
2276 cfg->fc_protocol = rtm->rtm_protocol;
2277 cfg->fc_type = rtm->rtm_type;
2278
2279 if (rtm->rtm_type == RTN_UNREACHABLE ||
2280 rtm->rtm_type == RTN_BLACKHOLE ||
2281 rtm->rtm_type == RTN_PROHIBIT ||
2282 rtm->rtm_type == RTN_THROW)
2283 cfg->fc_flags |= RTF_REJECT;
2284
2285 if (rtm->rtm_type == RTN_LOCAL)
2286 cfg->fc_flags |= RTF_LOCAL;
2287
2288 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2289 cfg->fc_nlinfo.nlh = nlh;
2290 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2291
2292 if (tb[RTA_GATEWAY]) {
2293 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2294 cfg->fc_flags |= RTF_GATEWAY;
2295 }
2296
2297 if (tb[RTA_DST]) {
2298 int plen = (rtm->rtm_dst_len + 7) >> 3;
2299
2300 if (nla_len(tb[RTA_DST]) < plen)
2301 goto errout;
2302
2303 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2304 }
2305
2306 if (tb[RTA_SRC]) {
2307 int plen = (rtm->rtm_src_len + 7) >> 3;
2308
2309 if (nla_len(tb[RTA_SRC]) < plen)
2310 goto errout;
2311
2312 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2313 }
2314
2315 if (tb[RTA_PREFSRC])
2316 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2317
2318 if (tb[RTA_OIF])
2319 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2320
2321 if (tb[RTA_PRIORITY])
2322 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2323
2324 if (tb[RTA_METRICS]) {
2325 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2326 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2327 }
2328
2329 if (tb[RTA_TABLE])
2330 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2331
2332 if (tb[RTA_MULTIPATH]) {
2333 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2334 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2335 }
2336
2337 err = 0;
2338 errout:
2339 return err;
2340 }
2341
2342 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2343 {
2344 struct fib6_config r_cfg;
2345 struct rtnexthop *rtnh;
2346 int remaining;
2347 int attrlen;
2348 int err = 0, last_err = 0;
2349
2350 beginning:
2351 rtnh = (struct rtnexthop *)cfg->fc_mp;
2352 remaining = cfg->fc_mp_len;
2353
2354 /* Parse a Multipath Entry */
2355 while (rtnh_ok(rtnh, remaining)) {
2356 memcpy(&r_cfg, cfg, sizeof(*cfg));
2357 if (rtnh->rtnh_ifindex)
2358 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2359
2360 attrlen = rtnh_attrlen(rtnh);
2361 if (attrlen > 0) {
2362 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2363
2364 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2365 if (nla) {
2366 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2367 r_cfg.fc_flags |= RTF_GATEWAY;
2368 }
2369 }
2370 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2371 if (err) {
2372 last_err = err;
2373 /* If we are trying to remove a route, do not stop the
2374 * loop when ip6_route_del() fails (because next hop is
2375 * already gone), we should try to remove all next hops.
2376 */
2377 if (add) {
2378 /* If add fails, we should try to delete all
2379 * next hops that have been already added.
2380 */
2381 add = 0;
2382 goto beginning;
2383 }
2384 }
2385 /* Because each route is added like a single route we remove
2386 * this flag after the first nexthop (if there is a collision,
2387 * we have already fail to add the first nexthop:
2388 * fib6_add_rt2node() has reject it).
2389 */
2390 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2391 rtnh = rtnh_next(rtnh, &remaining);
2392 }
2393
2394 return last_err;
2395 }
2396
2397 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2398 {
2399 struct fib6_config cfg;
2400 int err;
2401
2402 err = rtm_to_fib6_config(skb, nlh, &cfg);
2403 if (err < 0)
2404 return err;
2405
2406 if (cfg.fc_mp)
2407 return ip6_route_multipath(&cfg, 0);
2408 else
2409 return ip6_route_del(&cfg);
2410 }
2411
2412 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2413 {
2414 struct fib6_config cfg;
2415 int err;
2416
2417 err = rtm_to_fib6_config(skb, nlh, &cfg);
2418 if (err < 0)
2419 return err;
2420
2421 if (cfg.fc_mp)
2422 return ip6_route_multipath(&cfg, 1);
2423 else
2424 return ip6_route_add(&cfg);
2425 }
2426
2427 static inline size_t rt6_nlmsg_size(void)
2428 {
2429 return NLMSG_ALIGN(sizeof(struct rtmsg))
2430 + nla_total_size(16) /* RTA_SRC */
2431 + nla_total_size(16) /* RTA_DST */
2432 + nla_total_size(16) /* RTA_GATEWAY */
2433 + nla_total_size(16) /* RTA_PREFSRC */
2434 + nla_total_size(4) /* RTA_TABLE */
2435 + nla_total_size(4) /* RTA_IIF */
2436 + nla_total_size(4) /* RTA_OIF */
2437 + nla_total_size(4) /* RTA_PRIORITY */
2438 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2439 + nla_total_size(sizeof(struct rta_cacheinfo));
2440 }
2441
2442 static int rt6_fill_node(struct net *net,
2443 struct sk_buff *skb, struct rt6_info *rt,
2444 struct in6_addr *dst, struct in6_addr *src,
2445 int iif, int type, u32 portid, u32 seq,
2446 int prefix, int nowait, unsigned int flags)
2447 {
2448 struct rtmsg *rtm;
2449 struct nlmsghdr *nlh;
2450 long expires;
2451 u32 table;
2452
2453 if (prefix) { /* user wants prefix routes only */
2454 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2455 /* success since this is not a prefix route */
2456 return 1;
2457 }
2458 }
2459
2460 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2461 if (!nlh)
2462 return -EMSGSIZE;
2463
2464 rtm = nlmsg_data(nlh);
2465 rtm->rtm_family = AF_INET6;
2466 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2467 rtm->rtm_src_len = rt->rt6i_src.plen;
2468 rtm->rtm_tos = 0;
2469 if (rt->rt6i_table)
2470 table = rt->rt6i_table->tb6_id;
2471 else
2472 table = RT6_TABLE_UNSPEC;
2473 rtm->rtm_table = table;
2474 if (nla_put_u32(skb, RTA_TABLE, table))
2475 goto nla_put_failure;
2476 if (rt->rt6i_flags & RTF_REJECT) {
2477 switch (rt->dst.error) {
2478 case -EINVAL:
2479 rtm->rtm_type = RTN_BLACKHOLE;
2480 break;
2481 case -EACCES:
2482 rtm->rtm_type = RTN_PROHIBIT;
2483 break;
2484 case -EAGAIN:
2485 rtm->rtm_type = RTN_THROW;
2486 break;
2487 default:
2488 rtm->rtm_type = RTN_UNREACHABLE;
2489 break;
2490 }
2491 }
2492 else if (rt->rt6i_flags & RTF_LOCAL)
2493 rtm->rtm_type = RTN_LOCAL;
2494 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2495 rtm->rtm_type = RTN_LOCAL;
2496 else
2497 rtm->rtm_type = RTN_UNICAST;
2498 rtm->rtm_flags = 0;
2499 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2500 rtm->rtm_protocol = rt->rt6i_protocol;
2501 if (rt->rt6i_flags & RTF_DYNAMIC)
2502 rtm->rtm_protocol = RTPROT_REDIRECT;
2503 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2504 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2505 rtm->rtm_protocol = RTPROT_RA;
2506 else
2507 rtm->rtm_protocol = RTPROT_KERNEL;
2508 }
2509
2510 if (rt->rt6i_flags & RTF_CACHE)
2511 rtm->rtm_flags |= RTM_F_CLONED;
2512
2513 if (dst) {
2514 if (nla_put(skb, RTA_DST, 16, dst))
2515 goto nla_put_failure;
2516 rtm->rtm_dst_len = 128;
2517 } else if (rtm->rtm_dst_len)
2518 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2519 goto nla_put_failure;
2520 #ifdef CONFIG_IPV6_SUBTREES
2521 if (src) {
2522 if (nla_put(skb, RTA_SRC, 16, src))
2523 goto nla_put_failure;
2524 rtm->rtm_src_len = 128;
2525 } else if (rtm->rtm_src_len &&
2526 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2527 goto nla_put_failure;
2528 #endif
2529 if (iif) {
2530 #ifdef CONFIG_IPV6_MROUTE
2531 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2532 int err = ip6mr_get_route(net, skb, rtm, nowait);
2533 if (err <= 0) {
2534 if (!nowait) {
2535 if (err == 0)
2536 return 0;
2537 goto nla_put_failure;
2538 } else {
2539 if (err == -EMSGSIZE)
2540 goto nla_put_failure;
2541 }
2542 }
2543 } else
2544 #endif
2545 if (nla_put_u32(skb, RTA_IIF, iif))
2546 goto nla_put_failure;
2547 } else if (dst) {
2548 struct in6_addr saddr_buf;
2549 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2550 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2551 goto nla_put_failure;
2552 }
2553
2554 if (rt->rt6i_prefsrc.plen) {
2555 struct in6_addr saddr_buf;
2556 saddr_buf = rt->rt6i_prefsrc.addr;
2557 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2558 goto nla_put_failure;
2559 }
2560
2561 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2562 goto nla_put_failure;
2563
2564 if (rt->rt6i_flags & RTF_GATEWAY) {
2565 if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2566 goto nla_put_failure;
2567 }
2568
2569 if (rt->dst.dev &&
2570 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2571 goto nla_put_failure;
2572 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2573 goto nla_put_failure;
2574
2575 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2576
2577 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2578 goto nla_put_failure;
2579
2580 return nlmsg_end(skb, nlh);
2581
2582 nla_put_failure:
2583 nlmsg_cancel(skb, nlh);
2584 return -EMSGSIZE;
2585 }
2586
2587 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2588 {
2589 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2590 int prefix;
2591
2592 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2593 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2594 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2595 } else
2596 prefix = 0;
2597
2598 return rt6_fill_node(arg->net,
2599 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2600 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2601 prefix, 0, NLM_F_MULTI);
2602 }
2603
2604 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2605 {
2606 struct net *net = sock_net(in_skb->sk);
2607 struct nlattr *tb[RTA_MAX+1];
2608 struct rt6_info *rt;
2609 struct sk_buff *skb;
2610 struct rtmsg *rtm;
2611 struct flowi6 fl6;
2612 int err, iif = 0, oif = 0;
2613
2614 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2615 if (err < 0)
2616 goto errout;
2617
2618 err = -EINVAL;
2619 memset(&fl6, 0, sizeof(fl6));
2620
2621 if (tb[RTA_SRC]) {
2622 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2623 goto errout;
2624
2625 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2626 }
2627
2628 if (tb[RTA_DST]) {
2629 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2630 goto errout;
2631
2632 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2633 }
2634
2635 if (tb[RTA_IIF])
2636 iif = nla_get_u32(tb[RTA_IIF]);
2637
2638 if (tb[RTA_OIF])
2639 oif = nla_get_u32(tb[RTA_OIF]);
2640
2641 if (tb[RTA_UID])
2642 fl6.flowi6_uid = make_kuid(current_user_ns(),
2643 nla_get_u32(tb[RTA_UID]));
2644 else
2645 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
2646
2647 if (iif) {
2648 struct net_device *dev;
2649 int flags = 0;
2650
2651 dev = __dev_get_by_index(net, iif);
2652 if (!dev) {
2653 err = -ENODEV;
2654 goto errout;
2655 }
2656
2657 fl6.flowi6_iif = iif;
2658
2659 if (!ipv6_addr_any(&fl6.saddr))
2660 flags |= RT6_LOOKUP_F_HAS_SADDR;
2661
2662 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2663 flags);
2664 } else {
2665 fl6.flowi6_oif = oif;
2666
2667 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2668 }
2669
2670 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2671 if (!skb) {
2672 ip6_rt_put(rt);
2673 err = -ENOBUFS;
2674 goto errout;
2675 }
2676
2677 /* Reserve room for dummy headers, this skb can pass
2678 through good chunk of routing engine.
2679 */
2680 skb_reset_mac_header(skb);
2681 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2682
2683 skb_dst_set(skb, &rt->dst);
2684
2685 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2686 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2687 nlh->nlmsg_seq, 0, 0, 0);
2688 if (err < 0) {
2689 kfree_skb(skb);
2690 goto errout;
2691 }
2692
2693 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2694 errout:
2695 return err;
2696 }
2697
2698 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2699 {
2700 struct sk_buff *skb;
2701 struct net *net = info->nl_net;
2702 u32 seq;
2703 int err;
2704
2705 err = -ENOBUFS;
2706 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2707
2708 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2709 if (!skb)
2710 goto errout;
2711
2712 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2713 event, info->portid, seq, 0, 0, 0);
2714 if (err < 0) {
2715 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2716 WARN_ON(err == -EMSGSIZE);
2717 kfree_skb(skb);
2718 goto errout;
2719 }
2720 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2721 info->nlh, gfp_any());
2722 return;
2723 errout:
2724 if (err < 0)
2725 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2726 }
2727
2728 static int ip6_route_dev_notify(struct notifier_block *this,
2729 unsigned long event, void *data)
2730 {
2731 struct net_device *dev = (struct net_device *)data;
2732 struct net *net = dev_net(dev);
2733
2734 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2735 net->ipv6.ip6_null_entry->dst.dev = dev;
2736 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2737 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2738 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2739 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2740 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2741 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2742 #endif
2743 }
2744
2745 return NOTIFY_OK;
2746 }
2747
2748 /*
2749 * /proc
2750 */
2751
2752 #ifdef CONFIG_PROC_FS
2753
2754 struct rt6_proc_arg
2755 {
2756 char *buffer;
2757 int offset;
2758 int length;
2759 int skip;
2760 int len;
2761 };
2762
2763 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2764 {
2765 struct seq_file *m = p_arg;
2766
2767 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2768
2769 #ifdef CONFIG_IPV6_SUBTREES
2770 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2771 #else
2772 seq_puts(m, "00000000000000000000000000000000 00 ");
2773 #endif
2774 if (rt->rt6i_flags & RTF_GATEWAY) {
2775 seq_printf(m, "%pi6", &rt->rt6i_gateway);
2776 } else {
2777 seq_puts(m, "00000000000000000000000000000000");
2778 }
2779 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2780 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2781 rt->dst.__use, rt->rt6i_flags,
2782 rt->dst.dev ? rt->dst.dev->name : "");
2783 return 0;
2784 }
2785
2786 static int ipv6_route_show(struct seq_file *m, void *v)
2787 {
2788 struct net *net = (struct net *)m->private;
2789 fib6_clean_all_ro(net, rt6_info_route, 0, m);
2790 return 0;
2791 }
2792
2793 static int ipv6_route_open(struct inode *inode, struct file *file)
2794 {
2795 return single_open_net(inode, file, ipv6_route_show);
2796 }
2797
2798 static const struct file_operations ipv6_route_proc_fops = {
2799 .owner = THIS_MODULE,
2800 .open = ipv6_route_open,
2801 .read = seq_read,
2802 .llseek = seq_lseek,
2803 .release = single_release_net,
2804 };
2805
2806 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2807 {
2808 struct net *net = (struct net *)seq->private;
2809 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2810 net->ipv6.rt6_stats->fib_nodes,
2811 net->ipv6.rt6_stats->fib_route_nodes,
2812 net->ipv6.rt6_stats->fib_rt_alloc,
2813 net->ipv6.rt6_stats->fib_rt_entries,
2814 net->ipv6.rt6_stats->fib_rt_cache,
2815 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2816 net->ipv6.rt6_stats->fib_discarded_routes);
2817
2818 return 0;
2819 }
2820
2821 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2822 {
2823 return single_open_net(inode, file, rt6_stats_seq_show);
2824 }
2825
2826 static const struct file_operations rt6_stats_seq_fops = {
2827 .owner = THIS_MODULE,
2828 .open = rt6_stats_seq_open,
2829 .read = seq_read,
2830 .llseek = seq_lseek,
2831 .release = single_release_net,
2832 };
2833 #endif /* CONFIG_PROC_FS */
2834
2835 #ifdef CONFIG_SYSCTL
2836
2837 static
2838 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2839 void __user *buffer, size_t *lenp, loff_t *ppos)
2840 {
2841 struct net *net;
2842 int delay;
2843 if (!write)
2844 return -EINVAL;
2845
2846 net = (struct net *)ctl->extra1;
2847 delay = net->ipv6.sysctl.flush_delay;
2848 proc_dointvec(ctl, write, buffer, lenp, ppos);
2849 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2850 return 0;
2851 }
2852
2853 ctl_table ipv6_route_table_template[] = {
2854 {
2855 .procname = "flush",
2856 .data = &init_net.ipv6.sysctl.flush_delay,
2857 .maxlen = sizeof(int),
2858 .mode = 0200,
2859 .proc_handler = ipv6_sysctl_rtcache_flush
2860 },
2861 {
2862 .procname = "gc_thresh",
2863 .data = &ip6_dst_ops_template.gc_thresh,
2864 .maxlen = sizeof(int),
2865 .mode = 0644,
2866 .proc_handler = proc_dointvec,
2867 },
2868 {
2869 .procname = "max_size",
2870 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2871 .maxlen = sizeof(int),
2872 .mode = 0644,
2873 .proc_handler = proc_dointvec,
2874 },
2875 {
2876 .procname = "gc_min_interval",
2877 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2878 .maxlen = sizeof(int),
2879 .mode = 0644,
2880 .proc_handler = proc_dointvec_jiffies,
2881 },
2882 {
2883 .procname = "gc_timeout",
2884 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2885 .maxlen = sizeof(int),
2886 .mode = 0644,
2887 .proc_handler = proc_dointvec_jiffies,
2888 },
2889 {
2890 .procname = "gc_interval",
2891 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2892 .maxlen = sizeof(int),
2893 .mode = 0644,
2894 .proc_handler = proc_dointvec_jiffies,
2895 },
2896 {
2897 .procname = "gc_elasticity",
2898 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2899 .maxlen = sizeof(int),
2900 .mode = 0644,
2901 .proc_handler = proc_dointvec,
2902 },
2903 {
2904 .procname = "mtu_expires",
2905 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2906 .maxlen = sizeof(int),
2907 .mode = 0644,
2908 .proc_handler = proc_dointvec_jiffies,
2909 },
2910 {
2911 .procname = "min_adv_mss",
2912 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2913 .maxlen = sizeof(int),
2914 .mode = 0644,
2915 .proc_handler = proc_dointvec,
2916 },
2917 {
2918 .procname = "gc_min_interval_ms",
2919 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2920 .maxlen = sizeof(int),
2921 .mode = 0644,
2922 .proc_handler = proc_dointvec_ms_jiffies,
2923 },
2924 { }
2925 };
2926
2927 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2928 {
2929 struct ctl_table *table;
2930
2931 table = kmemdup(ipv6_route_table_template,
2932 sizeof(ipv6_route_table_template),
2933 GFP_KERNEL);
2934
2935 if (table) {
2936 table[0].data = &net->ipv6.sysctl.flush_delay;
2937 table[0].extra1 = net;
2938 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2939 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2940 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2941 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2942 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2943 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2944 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2945 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2946 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2947
2948 /* Don't export sysctls to unprivileged users */
2949 if (net->user_ns != &init_user_ns)
2950 table[0].procname = NULL;
2951 }
2952
2953 return table;
2954 }
2955 #endif
2956
2957 static int __net_init ip6_route_net_init(struct net *net)
2958 {
2959 int ret = -ENOMEM;
2960
2961 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2962 sizeof(net->ipv6.ip6_dst_ops));
2963
2964 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2965 goto out_ip6_dst_ops;
2966
2967 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2968 sizeof(*net->ipv6.ip6_null_entry),
2969 GFP_KERNEL);
2970 if (!net->ipv6.ip6_null_entry)
2971 goto out_ip6_dst_entries;
2972 net->ipv6.ip6_null_entry->dst.path =
2973 (struct dst_entry *)net->ipv6.ip6_null_entry;
2974 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2975 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2976 ip6_template_metrics, true);
2977
2978 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2979 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2980 sizeof(*net->ipv6.ip6_prohibit_entry),
2981 GFP_KERNEL);
2982 if (!net->ipv6.ip6_prohibit_entry)
2983 goto out_ip6_null_entry;
2984 net->ipv6.ip6_prohibit_entry->dst.path =
2985 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2986 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2987 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2988 ip6_template_metrics, true);
2989
2990 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2991 sizeof(*net->ipv6.ip6_blk_hole_entry),
2992 GFP_KERNEL);
2993 if (!net->ipv6.ip6_blk_hole_entry)
2994 goto out_ip6_prohibit_entry;
2995 net->ipv6.ip6_blk_hole_entry->dst.path =
2996 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2997 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2998 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2999 ip6_template_metrics, true);
3000 #endif
3001
3002 net->ipv6.sysctl.flush_delay = 0;
3003 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3004 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3005 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3006 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3007 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3008 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3009 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3010
3011 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3012
3013 ret = 0;
3014 out:
3015 return ret;
3016
3017 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3018 out_ip6_prohibit_entry:
3019 kfree(net->ipv6.ip6_prohibit_entry);
3020 out_ip6_null_entry:
3021 kfree(net->ipv6.ip6_null_entry);
3022 #endif
3023 out_ip6_dst_entries:
3024 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3025 out_ip6_dst_ops:
3026 goto out;
3027 }
3028
3029 static void __net_exit ip6_route_net_exit(struct net *net)
3030 {
3031 kfree(net->ipv6.ip6_null_entry);
3032 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3033 kfree(net->ipv6.ip6_prohibit_entry);
3034 kfree(net->ipv6.ip6_blk_hole_entry);
3035 #endif
3036 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3037 }
3038
3039 static int __net_init ip6_route_net_init_late(struct net *net)
3040 {
3041 #ifdef CONFIG_PROC_FS
3042 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3043 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3044 #endif
3045 return 0;
3046 }
3047
3048 static void __net_exit ip6_route_net_exit_late(struct net *net)
3049 {
3050 #ifdef CONFIG_PROC_FS
3051 remove_proc_entry("ipv6_route", net->proc_net);
3052 remove_proc_entry("rt6_stats", net->proc_net);
3053 #endif
3054 }
3055
3056 static struct pernet_operations ip6_route_net_ops = {
3057 .init = ip6_route_net_init,
3058 .exit = ip6_route_net_exit,
3059 };
3060
3061 static int __net_init ipv6_inetpeer_init(struct net *net)
3062 {
3063 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3064
3065 if (!bp)
3066 return -ENOMEM;
3067 inet_peer_base_init(bp);
3068 net->ipv6.peers = bp;
3069 return 0;
3070 }
3071
3072 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3073 {
3074 struct inet_peer_base *bp = net->ipv6.peers;
3075
3076 net->ipv6.peers = NULL;
3077 inetpeer_invalidate_tree(bp);
3078 kfree(bp);
3079 }
3080
3081 static struct pernet_operations ipv6_inetpeer_ops = {
3082 .init = ipv6_inetpeer_init,
3083 .exit = ipv6_inetpeer_exit,
3084 };
3085
3086 static struct pernet_operations ip6_route_net_late_ops = {
3087 .init = ip6_route_net_init_late,
3088 .exit = ip6_route_net_exit_late,
3089 };
3090
3091 static struct notifier_block ip6_route_dev_notifier = {
3092 .notifier_call = ip6_route_dev_notify,
3093 .priority = 0,
3094 };
3095
3096 int __init ip6_route_init(void)
3097 {
3098 int ret;
3099
3100 ret = -ENOMEM;
3101 ip6_dst_ops_template.kmem_cachep =
3102 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3103 SLAB_HWCACHE_ALIGN, NULL);
3104 if (!ip6_dst_ops_template.kmem_cachep)
3105 goto out;
3106
3107 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3108 if (ret)
3109 goto out_kmem_cache;
3110
3111 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3112 if (ret)
3113 goto out_dst_entries;
3114
3115 ret = register_pernet_subsys(&ip6_route_net_ops);
3116 if (ret)
3117 goto out_register_inetpeer;
3118
3119 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3120
3121 /* Registering of the loopback is done before this portion of code,
3122 * the loopback reference in rt6_info will not be taken, do it
3123 * manually for init_net */
3124 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3125 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3126 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3127 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3128 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3129 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3130 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3131 #endif
3132 ret = fib6_init();
3133 if (ret)
3134 goto out_register_subsys;
3135
3136 ret = xfrm6_init();
3137 if (ret)
3138 goto out_fib6_init;
3139
3140 ret = fib6_rules_init();
3141 if (ret)
3142 goto xfrm6_init;
3143
3144 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3145 if (ret)
3146 goto fib6_rules_init;
3147
3148 ret = -ENOBUFS;
3149 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3150 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3151 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3152 goto out_register_late_subsys;
3153
3154 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3155 if (ret)
3156 goto out_register_late_subsys;
3157
3158 out:
3159 return ret;
3160
3161 out_register_late_subsys:
3162 unregister_pernet_subsys(&ip6_route_net_late_ops);
3163 fib6_rules_init:
3164 fib6_rules_cleanup();
3165 xfrm6_init:
3166 xfrm6_fini();
3167 out_fib6_init:
3168 fib6_gc_cleanup();
3169 out_register_subsys:
3170 unregister_pernet_subsys(&ip6_route_net_ops);
3171 out_register_inetpeer:
3172 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3173 out_dst_entries:
3174 dst_entries_destroy(&ip6_dst_blackhole_ops);
3175 out_kmem_cache:
3176 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3177 goto out;
3178 }
3179
3180 void ip6_route_cleanup(void)
3181 {
3182 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3183 unregister_pernet_subsys(&ip6_route_net_late_ops);
3184 fib6_rules_cleanup();
3185 xfrm6_fini();
3186 fib6_gc_cleanup();
3187 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3188 unregister_pernet_subsys(&ip6_route_net_ops);
3189 dst_entries_destroy(&ip6_dst_blackhole_ops);
3190 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3191 }