2c79063b665383c98a5132f09bddd05b7fd9f1bc
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61
62 #include <asm/uaccess.h>
63
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67
68 enum rt6_nud_state {
69 RT6_NUD_FAIL_HARD = -2,
70 RT6_NUD_FAIL_SOFT = -1,
71 RT6_NUD_SUCCEED = 1
72 };
73
74 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
75 const struct in6_addr *dest);
76 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
77 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
78 static unsigned int ip6_mtu(const struct dst_entry *dst);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static int ip6_pkt_prohibit(struct sk_buff *skb);
88 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
89 static void ip6_link_failure(struct sk_buff *skb);
90 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
91 struct sk_buff *skb, u32 mtu);
92 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
93 struct sk_buff *skb);
94
95 #ifdef CONFIG_IPV6_ROUTE_INFO
96 static struct rt6_info *rt6_add_route_info(struct net_device *dev,
97 const struct in6_addr *prefix, int prefixlen,
98 const struct in6_addr *gwaddr, unsigned int pref);
99 static struct rt6_info *rt6_get_route_info(struct net_device *dev,
100 const struct in6_addr *prefix, int prefixlen,
101 const struct in6_addr *gwaddr);
102 #endif
103
104 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
105 {
106 struct rt6_info *rt = (struct rt6_info *) dst;
107 struct inet_peer *peer;
108 u32 *p = NULL;
109
110 if (!(rt->dst.flags & DST_HOST))
111 return dst_cow_metrics_generic(dst, old);
112
113 peer = rt6_get_peer_create(rt);
114 if (peer) {
115 u32 *old_p = __DST_METRICS_PTR(old);
116 unsigned long prev, new;
117
118 p = peer->metrics;
119 if (inet_metrics_new(peer))
120 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
121
122 new = (unsigned long) p;
123 prev = cmpxchg(&dst->_metrics, old, new);
124
125 if (prev != old) {
126 p = __DST_METRICS_PTR(prev);
127 if (prev & DST_METRICS_READ_ONLY)
128 p = NULL;
129 }
130 }
131 return p;
132 }
133
134 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
135 struct sk_buff *skb,
136 const void *daddr)
137 {
138 struct in6_addr *p = &rt->rt6i_gateway;
139
140 if (!ipv6_addr_any(p))
141 return (const void *) p;
142 else if (skb)
143 return &ipv6_hdr(skb)->daddr;
144 return daddr;
145 }
146
147 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
148 struct sk_buff *skb,
149 const void *daddr)
150 {
151 struct rt6_info *rt = (struct rt6_info *) dst;
152 struct neighbour *n;
153
154 daddr = choose_neigh_daddr(rt, skb, daddr);
155 n = __ipv6_neigh_lookup(dst->dev, daddr);
156 if (n)
157 return n;
158 return neigh_create(&nd_tbl, daddr, dst->dev);
159 }
160
161 static struct dst_ops ip6_dst_ops_template = {
162 .family = AF_INET6,
163 .protocol = cpu_to_be16(ETH_P_IPV6),
164 .gc = ip6_dst_gc,
165 .gc_thresh = 1024,
166 .check = ip6_dst_check,
167 .default_advmss = ip6_default_advmss,
168 .mtu = ip6_mtu,
169 .cow_metrics = ipv6_cow_metrics,
170 .destroy = ip6_dst_destroy,
171 .ifdown = ip6_dst_ifdown,
172 .negative_advice = ip6_negative_advice,
173 .link_failure = ip6_link_failure,
174 .update_pmtu = ip6_rt_update_pmtu,
175 .redirect = rt6_do_redirect,
176 .local_out = __ip6_local_out,
177 .neigh_lookup = ip6_neigh_lookup,
178 };
179
180 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
181 {
182 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
183
184 return mtu ? : dst->dev->mtu;
185 }
186
187 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
188 struct sk_buff *skb, u32 mtu)
189 {
190 }
191
192 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
193 struct sk_buff *skb)
194 {
195 }
196
197 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
198 unsigned long old)
199 {
200 return NULL;
201 }
202
203 static struct dst_ops ip6_dst_blackhole_ops = {
204 .family = AF_INET6,
205 .protocol = cpu_to_be16(ETH_P_IPV6),
206 .destroy = ip6_dst_destroy,
207 .check = ip6_dst_check,
208 .mtu = ip6_blackhole_mtu,
209 .default_advmss = ip6_default_advmss,
210 .update_pmtu = ip6_rt_blackhole_update_pmtu,
211 .redirect = ip6_rt_blackhole_redirect,
212 .cow_metrics = ip6_rt_blackhole_cow_metrics,
213 .neigh_lookup = ip6_neigh_lookup,
214 };
215
216 static const u32 ip6_template_metrics[RTAX_MAX] = {
217 [RTAX_HOPLIMIT - 1] = 0,
218 };
219
220 static const struct rt6_info ip6_null_entry_template = {
221 .dst = {
222 .__refcnt = ATOMIC_INIT(1),
223 .__use = 1,
224 .obsolete = DST_OBSOLETE_FORCE_CHK,
225 .error = -ENETUNREACH,
226 .input = ip6_pkt_discard,
227 .output = ip6_pkt_discard_out,
228 },
229 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
230 .rt6i_protocol = RTPROT_KERNEL,
231 .rt6i_metric = ~(u32) 0,
232 .rt6i_ref = ATOMIC_INIT(1),
233 };
234
235 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
236
237 static const struct rt6_info ip6_prohibit_entry_template = {
238 .dst = {
239 .__refcnt = ATOMIC_INIT(1),
240 .__use = 1,
241 .obsolete = DST_OBSOLETE_FORCE_CHK,
242 .error = -EACCES,
243 .input = ip6_pkt_prohibit,
244 .output = ip6_pkt_prohibit_out,
245 },
246 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
247 .rt6i_protocol = RTPROT_KERNEL,
248 .rt6i_metric = ~(u32) 0,
249 .rt6i_ref = ATOMIC_INIT(1),
250 };
251
252 static const struct rt6_info ip6_blk_hole_entry_template = {
253 .dst = {
254 .__refcnt = ATOMIC_INIT(1),
255 .__use = 1,
256 .obsolete = DST_OBSOLETE_FORCE_CHK,
257 .error = -EINVAL,
258 .input = dst_discard,
259 .output = dst_discard,
260 },
261 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
262 .rt6i_protocol = RTPROT_KERNEL,
263 .rt6i_metric = ~(u32) 0,
264 .rt6i_ref = ATOMIC_INIT(1),
265 };
266
267 #endif
268
269 /* allocate dst with ip6_dst_ops */
270 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
271 struct net_device *dev,
272 int flags,
273 struct fib6_table *table)
274 {
275 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
276 0, DST_OBSOLETE_FORCE_CHK, flags);
277
278 if (rt) {
279 struct dst_entry *dst = &rt->dst;
280
281 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
282 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
283 rt->rt6i_genid = rt_genid(net);
284 INIT_LIST_HEAD(&rt->rt6i_siblings);
285 rt->rt6i_nsiblings = 0;
286 }
287 return rt;
288 }
289
290 static void ip6_dst_destroy(struct dst_entry *dst)
291 {
292 struct rt6_info *rt = (struct rt6_info *)dst;
293 struct inet6_dev *idev = rt->rt6i_idev;
294 struct dst_entry *from = dst->from;
295
296 if (!(rt->dst.flags & DST_HOST))
297 dst_destroy_metrics_generic(dst);
298
299 if (idev) {
300 rt->rt6i_idev = NULL;
301 in6_dev_put(idev);
302 }
303
304 dst->from = NULL;
305 dst_release(from);
306
307 if (rt6_has_peer(rt)) {
308 struct inet_peer *peer = rt6_peer_ptr(rt);
309 inet_putpeer(peer);
310 }
311 }
312
313 void rt6_bind_peer(struct rt6_info *rt, int create)
314 {
315 struct inet_peer_base *base;
316 struct inet_peer *peer;
317
318 base = inetpeer_base_ptr(rt->_rt6i_peer);
319 if (!base)
320 return;
321
322 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
323 if (peer) {
324 if (!rt6_set_peer(rt, peer))
325 inet_putpeer(peer);
326 }
327 }
328
329 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
330 int how)
331 {
332 struct rt6_info *rt = (struct rt6_info *)dst;
333 struct inet6_dev *idev = rt->rt6i_idev;
334 struct net_device *loopback_dev =
335 dev_net(dev)->loopback_dev;
336
337 if (dev != loopback_dev) {
338 if (idev && idev->dev == dev) {
339 struct inet6_dev *loopback_idev =
340 in6_dev_get(loopback_dev);
341 if (loopback_idev) {
342 rt->rt6i_idev = loopback_idev;
343 in6_dev_put(idev);
344 }
345 }
346 }
347 }
348
349 static bool rt6_check_expired(const struct rt6_info *rt)
350 {
351 if (rt->rt6i_flags & RTF_EXPIRES) {
352 if (time_after(jiffies, rt->dst.expires))
353 return true;
354 } else if (rt->dst.from) {
355 return rt6_check_expired((struct rt6_info *) rt->dst.from);
356 }
357 return false;
358 }
359
360 static bool rt6_need_strict(const struct in6_addr *daddr)
361 {
362 return ipv6_addr_type(daddr) &
363 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
364 }
365
366 /* Multipath route selection:
367 * Hash based function using packet header and flowlabel.
368 * Adapted from fib_info_hashfn()
369 */
370 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
371 const struct flowi6 *fl6)
372 {
373 unsigned int val = fl6->flowi6_proto;
374
375 val ^= ipv6_addr_hash(&fl6->daddr);
376 val ^= ipv6_addr_hash(&fl6->saddr);
377
378 /* Work only if this not encapsulated */
379 switch (fl6->flowi6_proto) {
380 case IPPROTO_UDP:
381 case IPPROTO_TCP:
382 case IPPROTO_SCTP:
383 val ^= (__force u16)fl6->fl6_sport;
384 val ^= (__force u16)fl6->fl6_dport;
385 break;
386
387 case IPPROTO_ICMPV6:
388 val ^= (__force u16)fl6->fl6_icmp_type;
389 val ^= (__force u16)fl6->fl6_icmp_code;
390 break;
391 }
392 /* RFC6438 recommands to use flowlabel */
393 val ^= (__force u32)fl6->flowlabel;
394
395 /* Perhaps, we need to tune, this function? */
396 val = val ^ (val >> 7) ^ (val >> 12);
397 return val % candidate_count;
398 }
399
400 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
401 struct flowi6 *fl6)
402 {
403 struct rt6_info *sibling, *next_sibling;
404 int route_choosen;
405
406 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
407 /* Don't change the route, if route_choosen == 0
408 * (siblings does not include ourself)
409 */
410 if (route_choosen)
411 list_for_each_entry_safe(sibling, next_sibling,
412 &match->rt6i_siblings, rt6i_siblings) {
413 route_choosen--;
414 if (route_choosen == 0) {
415 match = sibling;
416 break;
417 }
418 }
419 return match;
420 }
421
422 /*
423 * Route lookup. Any table->tb6_lock is implied.
424 */
425
426 static inline struct rt6_info *rt6_device_match(struct net *net,
427 struct rt6_info *rt,
428 const struct in6_addr *saddr,
429 int oif,
430 int flags)
431 {
432 struct rt6_info *local = NULL;
433 struct rt6_info *sprt;
434
435 if (!oif && ipv6_addr_any(saddr))
436 goto out;
437
438 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
439 struct net_device *dev = sprt->dst.dev;
440
441 if (oif) {
442 if (dev->ifindex == oif)
443 return sprt;
444 if (dev->flags & IFF_LOOPBACK) {
445 if (!sprt->rt6i_idev ||
446 sprt->rt6i_idev->dev->ifindex != oif) {
447 if (flags & RT6_LOOKUP_F_IFACE && oif)
448 continue;
449 if (local && (!oif ||
450 local->rt6i_idev->dev->ifindex == oif))
451 continue;
452 }
453 local = sprt;
454 }
455 } else {
456 if (ipv6_chk_addr(net, saddr, dev,
457 flags & RT6_LOOKUP_F_IFACE))
458 return sprt;
459 }
460 }
461
462 if (oif) {
463 if (local)
464 return local;
465
466 if (flags & RT6_LOOKUP_F_IFACE)
467 return net->ipv6.ip6_null_entry;
468 }
469 out:
470 return rt;
471 }
472
473 #ifdef CONFIG_IPV6_ROUTER_PREF
474 struct __rt6_probe_work {
475 struct work_struct work;
476 struct in6_addr target;
477 struct net_device *dev;
478 };
479
480 static void rt6_probe_deferred(struct work_struct *w)
481 {
482 struct in6_addr mcaddr;
483 struct __rt6_probe_work *work =
484 container_of(w, struct __rt6_probe_work, work);
485
486 addrconf_addr_solict_mult(&work->target, &mcaddr);
487 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
488 dev_put(work->dev);
489 kfree(w);
490 }
491
492 static void rt6_probe(struct rt6_info *rt)
493 {
494 struct neighbour *neigh;
495 /*
496 * Okay, this does not seem to be appropriate
497 * for now, however, we need to check if it
498 * is really so; aka Router Reachability Probing.
499 *
500 * Router Reachability Probe MUST be rate-limited
501 * to no more than one per minute.
502 */
503 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
504 return;
505 rcu_read_lock_bh();
506 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
507 if (neigh) {
508 write_lock(&neigh->lock);
509 if (neigh->nud_state & NUD_VALID)
510 goto out;
511 }
512
513 if (!neigh ||
514 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
515 struct __rt6_probe_work *work;
516
517 work = kmalloc(sizeof(*work), GFP_ATOMIC);
518
519 if (neigh && work)
520 neigh->updated = jiffies;
521
522 if (neigh)
523 write_unlock(&neigh->lock);
524
525 if (work) {
526 INIT_WORK(&work->work, rt6_probe_deferred);
527 work->target = rt->rt6i_gateway;
528 dev_hold(rt->dst.dev);
529 work->dev = rt->dst.dev;
530 schedule_work(&work->work);
531 }
532 } else {
533 out:
534 write_unlock(&neigh->lock);
535 }
536 rcu_read_unlock_bh();
537 }
538 #else
539 static inline void rt6_probe(struct rt6_info *rt)
540 {
541 }
542 #endif
543
544 /*
545 * Default Router Selection (RFC 2461 6.3.6)
546 */
547 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
548 {
549 struct net_device *dev = rt->dst.dev;
550 if (!oif || dev->ifindex == oif)
551 return 2;
552 if ((dev->flags & IFF_LOOPBACK) &&
553 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
554 return 1;
555 return 0;
556 }
557
558 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
559 {
560 struct neighbour *neigh;
561 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
562
563 if (rt->rt6i_flags & RTF_NONEXTHOP ||
564 !(rt->rt6i_flags & RTF_GATEWAY))
565 return RT6_NUD_SUCCEED;
566
567 rcu_read_lock_bh();
568 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
569 if (neigh) {
570 read_lock(&neigh->lock);
571 if (neigh->nud_state & NUD_VALID)
572 ret = RT6_NUD_SUCCEED;
573 #ifdef CONFIG_IPV6_ROUTER_PREF
574 else if (!(neigh->nud_state & NUD_FAILED))
575 ret = RT6_NUD_SUCCEED;
576 #endif
577 read_unlock(&neigh->lock);
578 } else {
579 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
580 RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
581 }
582 rcu_read_unlock_bh();
583
584 return ret;
585 }
586
587 static int rt6_score_route(struct rt6_info *rt, int oif,
588 int strict)
589 {
590 int m;
591
592 m = rt6_check_dev(rt, oif);
593 if (!m && (strict & RT6_LOOKUP_F_IFACE))
594 return RT6_NUD_FAIL_HARD;
595 #ifdef CONFIG_IPV6_ROUTER_PREF
596 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
597 #endif
598 if (strict & RT6_LOOKUP_F_REACHABLE) {
599 int n = rt6_check_neigh(rt);
600 if (n < 0)
601 return n;
602 }
603 return m;
604 }
605
606 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
607 int *mpri, struct rt6_info *match,
608 bool *do_rr)
609 {
610 int m;
611 bool match_do_rr = false;
612
613 if (rt6_check_expired(rt))
614 goto out;
615
616 m = rt6_score_route(rt, oif, strict);
617 if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
618 match_do_rr = true;
619 m = 0; /* lowest valid score */
620 } else if (m < 0) {
621 goto out;
622 }
623
624 if (strict & RT6_LOOKUP_F_REACHABLE)
625 rt6_probe(rt);
626
627 if (m > *mpri) {
628 *do_rr = match_do_rr;
629 *mpri = m;
630 match = rt;
631 }
632 out:
633 return match;
634 }
635
636 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
637 struct rt6_info *rr_head,
638 u32 metric, int oif, int strict,
639 bool *do_rr)
640 {
641 struct rt6_info *rt, *match;
642 int mpri = -1;
643
644 match = NULL;
645 for (rt = rr_head; rt && rt->rt6i_metric == metric;
646 rt = rt->dst.rt6_next)
647 match = find_match(rt, oif, strict, &mpri, match, do_rr);
648 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
649 rt = rt->dst.rt6_next)
650 match = find_match(rt, oif, strict, &mpri, match, do_rr);
651
652 return match;
653 }
654
655 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
656 {
657 struct rt6_info *match, *rt0;
658 struct net *net;
659 bool do_rr = false;
660
661 rt0 = fn->rr_ptr;
662 if (!rt0)
663 fn->rr_ptr = rt0 = fn->leaf;
664
665 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
666 &do_rr);
667
668 if (do_rr) {
669 struct rt6_info *next = rt0->dst.rt6_next;
670
671 /* no entries matched; do round-robin */
672 if (!next || next->rt6i_metric != rt0->rt6i_metric)
673 next = fn->leaf;
674
675 if (next != rt0)
676 fn->rr_ptr = next;
677 }
678
679 net = dev_net(rt0->dst.dev);
680 return match ? match : net->ipv6.ip6_null_entry;
681 }
682
683 #ifdef CONFIG_IPV6_ROUTE_INFO
684 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
685 const struct in6_addr *gwaddr)
686 {
687 struct route_info *rinfo = (struct route_info *) opt;
688 struct in6_addr prefix_buf, *prefix;
689 unsigned int pref;
690 unsigned long lifetime;
691 struct rt6_info *rt;
692
693 if (len < sizeof(struct route_info)) {
694 return -EINVAL;
695 }
696
697 /* Sanity check for prefix_len and length */
698 if (rinfo->length > 3) {
699 return -EINVAL;
700 } else if (rinfo->prefix_len > 128) {
701 return -EINVAL;
702 } else if (rinfo->prefix_len > 64) {
703 if (rinfo->length < 2) {
704 return -EINVAL;
705 }
706 } else if (rinfo->prefix_len > 0) {
707 if (rinfo->length < 1) {
708 return -EINVAL;
709 }
710 }
711
712 pref = rinfo->route_pref;
713 if (pref == ICMPV6_ROUTER_PREF_INVALID)
714 return -EINVAL;
715
716 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
717
718 if (rinfo->length == 3)
719 prefix = (struct in6_addr *)rinfo->prefix;
720 else {
721 /* this function is safe */
722 ipv6_addr_prefix(&prefix_buf,
723 (struct in6_addr *)rinfo->prefix,
724 rinfo->prefix_len);
725 prefix = &prefix_buf;
726 }
727
728 rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
729
730 if (rt && !lifetime) {
731 ip6_del_rt(rt);
732 rt = NULL;
733 }
734
735 if (!rt && lifetime)
736 rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
737 else if (rt)
738 rt->rt6i_flags = RTF_ROUTEINFO |
739 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
740
741 if (rt) {
742 if (!addrconf_finite_timeout(lifetime))
743 rt6_clean_expires(rt);
744 else
745 rt6_set_expires(rt, jiffies + HZ * lifetime);
746
747 ip6_rt_put(rt);
748 }
749 return 0;
750 }
751 #endif
752
753 #define BACKTRACK(__net, saddr) \
754 do { \
755 if (rt == __net->ipv6.ip6_null_entry) { \
756 struct fib6_node *pn; \
757 while (1) { \
758 if (fn->fn_flags & RTN_TL_ROOT) \
759 goto out; \
760 pn = fn->parent; \
761 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
762 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
763 else \
764 fn = pn; \
765 if (fn->fn_flags & RTN_RTINFO) \
766 goto restart; \
767 } \
768 } \
769 } while (0)
770
771 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
772 struct fib6_table *table,
773 struct flowi6 *fl6, int flags)
774 {
775 struct fib6_node *fn;
776 struct rt6_info *rt;
777
778 read_lock_bh(&table->tb6_lock);
779 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
780 restart:
781 rt = fn->leaf;
782 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
783 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
784 rt = rt6_multipath_select(rt, fl6);
785 BACKTRACK(net, &fl6->saddr);
786 out:
787 dst_use(&rt->dst, jiffies);
788 read_unlock_bh(&table->tb6_lock);
789 return rt;
790
791 }
792
793 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
794 int flags)
795 {
796 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
797 }
798 EXPORT_SYMBOL_GPL(ip6_route_lookup);
799
800 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
801 const struct in6_addr *saddr, int oif, int strict)
802 {
803 struct flowi6 fl6 = {
804 .flowi6_oif = oif,
805 .daddr = *daddr,
806 };
807 struct dst_entry *dst;
808 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
809
810 if (saddr) {
811 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
812 flags |= RT6_LOOKUP_F_HAS_SADDR;
813 }
814
815 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
816 if (dst->error == 0)
817 return (struct rt6_info *) dst;
818
819 dst_release(dst);
820
821 return NULL;
822 }
823
824 EXPORT_SYMBOL(rt6_lookup);
825
826 /* ip6_ins_rt is called with FREE table->tb6_lock.
827 It takes new route entry, the addition fails by any reason the
828 route is freed. In any case, if caller does not hold it, it may
829 be destroyed.
830 */
831
832 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
833 {
834 int err;
835 struct fib6_table *table;
836
837 table = rt->rt6i_table;
838 write_lock_bh(&table->tb6_lock);
839 err = fib6_add(&table->tb6_root, rt, info);
840 write_unlock_bh(&table->tb6_lock);
841
842 return err;
843 }
844
845 int ip6_ins_rt(struct rt6_info *rt)
846 {
847 struct nl_info info = {
848 .nl_net = dev_net(rt->dst.dev),
849 };
850 return __ip6_ins_rt(rt, &info);
851 }
852
853 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
854 const struct in6_addr *daddr,
855 const struct in6_addr *saddr)
856 {
857 struct rt6_info *rt;
858
859 /*
860 * Clone the route.
861 */
862
863 rt = ip6_rt_copy(ort, daddr);
864
865 if (rt) {
866 if (!(rt->rt6i_flags & RTF_GATEWAY)) {
867 if (ort->rt6i_dst.plen != 128 &&
868 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
869 rt->rt6i_flags |= RTF_ANYCAST;
870 }
871
872 rt->rt6i_flags |= RTF_CACHE;
873
874 #ifdef CONFIG_IPV6_SUBTREES
875 if (rt->rt6i_src.plen && saddr) {
876 rt->rt6i_src.addr = *saddr;
877 rt->rt6i_src.plen = 128;
878 }
879 #endif
880 }
881
882 return rt;
883 }
884
885 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
886 const struct in6_addr *daddr)
887 {
888 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
889
890 if (rt)
891 rt->rt6i_flags |= RTF_CACHE;
892 return rt;
893 }
894
895 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
896 struct flowi6 *fl6, int flags)
897 {
898 struct fib6_node *fn;
899 struct rt6_info *rt, *nrt;
900 int strict = 0;
901 int attempts = 3;
902 int err;
903 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
904
905 strict |= flags & RT6_LOOKUP_F_IFACE;
906
907 relookup:
908 read_lock_bh(&table->tb6_lock);
909
910 restart_2:
911 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
912
913 restart:
914 rt = rt6_select(fn, oif, strict | reachable);
915 if (rt->rt6i_nsiblings && oif == 0)
916 rt = rt6_multipath_select(rt, fl6);
917 BACKTRACK(net, &fl6->saddr);
918 if (rt == net->ipv6.ip6_null_entry ||
919 rt->rt6i_flags & RTF_CACHE)
920 goto out;
921
922 dst_hold(&rt->dst);
923 read_unlock_bh(&table->tb6_lock);
924
925 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
926 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
927 else if (!(rt->dst.flags & DST_HOST))
928 nrt = rt6_alloc_clone(rt, &fl6->daddr);
929 else
930 goto out2;
931
932 ip6_rt_put(rt);
933 rt = nrt ? : net->ipv6.ip6_null_entry;
934
935 dst_hold(&rt->dst);
936 if (nrt) {
937 err = ip6_ins_rt(nrt);
938 if (!err)
939 goto out2;
940 }
941
942 if (--attempts <= 0)
943 goto out2;
944
945 /*
946 * Race condition! In the gap, when table->tb6_lock was
947 * released someone could insert this route. Relookup.
948 */
949 ip6_rt_put(rt);
950 goto relookup;
951
952 out:
953 if (reachable) {
954 reachable = 0;
955 goto restart_2;
956 }
957 dst_hold(&rt->dst);
958 read_unlock_bh(&table->tb6_lock);
959 out2:
960 rt->dst.lastuse = jiffies;
961 rt->dst.__use++;
962
963 return rt;
964 }
965
966 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
967 struct flowi6 *fl6, int flags)
968 {
969 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
970 }
971
972 static struct dst_entry *ip6_route_input_lookup(struct net *net,
973 struct net_device *dev,
974 struct flowi6 *fl6, int flags)
975 {
976 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
977 flags |= RT6_LOOKUP_F_IFACE;
978
979 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
980 }
981
982 void ip6_route_input(struct sk_buff *skb)
983 {
984 const struct ipv6hdr *iph = ipv6_hdr(skb);
985 struct net *net = dev_net(skb->dev);
986 int flags = RT6_LOOKUP_F_HAS_SADDR;
987 struct flowi6 fl6 = {
988 .flowi6_iif = skb->dev->ifindex,
989 .daddr = iph->daddr,
990 .saddr = iph->saddr,
991 .flowlabel = ip6_flowinfo(iph),
992 .flowi6_mark = skb->mark,
993 .flowi6_proto = iph->nexthdr,
994 };
995
996 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
997 }
998
999 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1000 struct flowi6 *fl6, int flags)
1001 {
1002 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1003 }
1004
1005 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
1006 struct flowi6 *fl6)
1007 {
1008 int flags = 0;
1009
1010 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1011
1012 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1013 flags |= RT6_LOOKUP_F_IFACE;
1014
1015 if (!ipv6_addr_any(&fl6->saddr))
1016 flags |= RT6_LOOKUP_F_HAS_SADDR;
1017 else if (sk)
1018 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1019
1020 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1021 }
1022
1023 EXPORT_SYMBOL(ip6_route_output);
1024
1025 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1026 {
1027 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1028 struct dst_entry *new = NULL;
1029
1030 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1031 if (rt) {
1032 new = &rt->dst;
1033
1034 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1035 rt6_init_peer(rt, net->ipv6.peers);
1036
1037 new->__use = 1;
1038 new->input = dst_discard;
1039 new->output = dst_discard;
1040
1041 if (dst_metrics_read_only(&ort->dst))
1042 new->_metrics = ort->dst._metrics;
1043 else
1044 dst_copy_metrics(new, &ort->dst);
1045 rt->rt6i_idev = ort->rt6i_idev;
1046 if (rt->rt6i_idev)
1047 in6_dev_hold(rt->rt6i_idev);
1048
1049 rt->rt6i_gateway = ort->rt6i_gateway;
1050 rt->rt6i_flags = ort->rt6i_flags;
1051 rt->rt6i_metric = 0;
1052
1053 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1054 #ifdef CONFIG_IPV6_SUBTREES
1055 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1056 #endif
1057
1058 dst_free(new);
1059 }
1060
1061 dst_release(dst_orig);
1062 return new ? new : ERR_PTR(-ENOMEM);
1063 }
1064
1065 /*
1066 * Destination cache support functions
1067 */
1068
1069 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1070 {
1071 struct rt6_info *rt;
1072
1073 rt = (struct rt6_info *) dst;
1074
1075 /* All IPV6 dsts are created with ->obsolete set to the value
1076 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1077 * into this function always.
1078 */
1079 if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
1080 return NULL;
1081
1082 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1083 return NULL;
1084
1085 if (rt6_check_expired(rt))
1086 return NULL;
1087
1088 return dst;
1089 }
1090
1091 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1092 {
1093 struct rt6_info *rt = (struct rt6_info *) dst;
1094
1095 if (rt) {
1096 if (rt->rt6i_flags & RTF_CACHE) {
1097 if (rt6_check_expired(rt)) {
1098 ip6_del_rt(rt);
1099 dst = NULL;
1100 }
1101 } else {
1102 dst_release(dst);
1103 dst = NULL;
1104 }
1105 }
1106 return dst;
1107 }
1108
1109 static void ip6_link_failure(struct sk_buff *skb)
1110 {
1111 struct rt6_info *rt;
1112
1113 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1114
1115 rt = (struct rt6_info *) skb_dst(skb);
1116 if (rt) {
1117 if (rt->rt6i_flags & RTF_CACHE) {
1118 dst_hold(&rt->dst);
1119 if (ip6_del_rt(rt))
1120 dst_free(&rt->dst);
1121 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1122 rt->rt6i_node->fn_sernum = -1;
1123 }
1124 }
1125 }
1126
1127 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1128 struct sk_buff *skb, u32 mtu)
1129 {
1130 struct rt6_info *rt6 = (struct rt6_info*)dst;
1131
1132 dst_confirm(dst);
1133 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1134 struct net *net = dev_net(dst->dev);
1135
1136 rt6->rt6i_flags |= RTF_MODIFIED;
1137 if (mtu < IPV6_MIN_MTU)
1138 mtu = IPV6_MIN_MTU;
1139
1140 dst_metric_set(dst, RTAX_MTU, mtu);
1141 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1142 }
1143 }
1144
1145 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1146 int oif, u32 mark)
1147 {
1148 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1149 struct dst_entry *dst;
1150 struct flowi6 fl6;
1151
1152 memset(&fl6, 0, sizeof(fl6));
1153 fl6.flowi6_oif = oif;
1154 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1155 fl6.flowi6_flags = 0;
1156 fl6.daddr = iph->daddr;
1157 fl6.saddr = iph->saddr;
1158 fl6.flowlabel = ip6_flowinfo(iph);
1159
1160 dst = ip6_route_output(net, NULL, &fl6);
1161 if (!dst->error)
1162 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1163 dst_release(dst);
1164 }
1165 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1166
1167 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1168 {
1169 ip6_update_pmtu(skb, sock_net(sk), mtu,
1170 sk->sk_bound_dev_if, sk->sk_mark);
1171 }
1172 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1173
1174 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1175 {
1176 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1177 struct dst_entry *dst;
1178 struct flowi6 fl6;
1179
1180 memset(&fl6, 0, sizeof(fl6));
1181 fl6.flowi6_oif = oif;
1182 fl6.flowi6_mark = mark;
1183 fl6.flowi6_flags = 0;
1184 fl6.daddr = iph->daddr;
1185 fl6.saddr = iph->saddr;
1186 fl6.flowlabel = ip6_flowinfo(iph);
1187
1188 dst = ip6_route_output(net, NULL, &fl6);
1189 if (!dst->error)
1190 rt6_do_redirect(dst, NULL, skb);
1191 dst_release(dst);
1192 }
1193 EXPORT_SYMBOL_GPL(ip6_redirect);
1194
1195 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1196 {
1197 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1198 }
1199 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1200
1201 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1202 {
1203 struct net_device *dev = dst->dev;
1204 unsigned int mtu = dst_mtu(dst);
1205 struct net *net = dev_net(dev);
1206
1207 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1208
1209 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1210 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1211
1212 /*
1213 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1214 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1215 * IPV6_MAXPLEN is also valid and means: "any MSS,
1216 * rely only on pmtu discovery"
1217 */
1218 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1219 mtu = IPV6_MAXPLEN;
1220 return mtu;
1221 }
1222
1223 static unsigned int ip6_mtu(const struct dst_entry *dst)
1224 {
1225 struct inet6_dev *idev;
1226 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1227
1228 if (mtu)
1229 goto out;
1230
1231 mtu = IPV6_MIN_MTU;
1232
1233 rcu_read_lock();
1234 idev = __in6_dev_get(dst->dev);
1235 if (idev)
1236 mtu = idev->cnf.mtu6;
1237 rcu_read_unlock();
1238
1239 out:
1240 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1241 }
1242
1243 static struct dst_entry *icmp6_dst_gc_list;
1244 static DEFINE_SPINLOCK(icmp6_dst_lock);
1245
1246 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1247 struct flowi6 *fl6)
1248 {
1249 struct dst_entry *dst;
1250 struct rt6_info *rt;
1251 struct inet6_dev *idev = in6_dev_get(dev);
1252 struct net *net = dev_net(dev);
1253
1254 if (unlikely(!idev))
1255 return ERR_PTR(-ENODEV);
1256
1257 rt = ip6_dst_alloc(net, dev, 0, NULL);
1258 if (unlikely(!rt)) {
1259 in6_dev_put(idev);
1260 dst = ERR_PTR(-ENOMEM);
1261 goto out;
1262 }
1263
1264 rt->dst.flags |= DST_HOST;
1265 rt->dst.output = ip6_output;
1266 atomic_set(&rt->dst.__refcnt, 1);
1267 rt->rt6i_gateway = fl6->daddr;
1268 rt->rt6i_dst.addr = fl6->daddr;
1269 rt->rt6i_dst.plen = 128;
1270 rt->rt6i_idev = idev;
1271 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1272
1273 spin_lock_bh(&icmp6_dst_lock);
1274 rt->dst.next = icmp6_dst_gc_list;
1275 icmp6_dst_gc_list = &rt->dst;
1276 spin_unlock_bh(&icmp6_dst_lock);
1277
1278 fib6_force_start_gc(net);
1279
1280 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1281
1282 out:
1283 return dst;
1284 }
1285
1286 int icmp6_dst_gc(void)
1287 {
1288 struct dst_entry *dst, **pprev;
1289 int more = 0;
1290
1291 spin_lock_bh(&icmp6_dst_lock);
1292 pprev = &icmp6_dst_gc_list;
1293
1294 while ((dst = *pprev) != NULL) {
1295 if (!atomic_read(&dst->__refcnt)) {
1296 *pprev = dst->next;
1297 dst_free(dst);
1298 } else {
1299 pprev = &dst->next;
1300 ++more;
1301 }
1302 }
1303
1304 spin_unlock_bh(&icmp6_dst_lock);
1305
1306 return more;
1307 }
1308
1309 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1310 void *arg)
1311 {
1312 struct dst_entry *dst, **pprev;
1313
1314 spin_lock_bh(&icmp6_dst_lock);
1315 pprev = &icmp6_dst_gc_list;
1316 while ((dst = *pprev) != NULL) {
1317 struct rt6_info *rt = (struct rt6_info *) dst;
1318 if (func(rt, arg)) {
1319 *pprev = dst->next;
1320 dst_free(dst);
1321 } else {
1322 pprev = &dst->next;
1323 }
1324 }
1325 spin_unlock_bh(&icmp6_dst_lock);
1326 }
1327
1328 static int ip6_dst_gc(struct dst_ops *ops)
1329 {
1330 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1331 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1332 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1333 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1334 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1335 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1336 int entries;
1337
1338 entries = dst_entries_get_fast(ops);
1339 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1340 entries <= rt_max_size)
1341 goto out;
1342
1343 net->ipv6.ip6_rt_gc_expire++;
1344 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
1345 entries = dst_entries_get_slow(ops);
1346 if (entries < ops->gc_thresh)
1347 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1348 out:
1349 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1350 return entries > rt_max_size;
1351 }
1352
1353 int ip6_dst_hoplimit(struct dst_entry *dst)
1354 {
1355 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1356 if (hoplimit == 0) {
1357 struct net_device *dev = dst->dev;
1358 struct inet6_dev *idev;
1359
1360 rcu_read_lock();
1361 idev = __in6_dev_get(dev);
1362 if (idev)
1363 hoplimit = idev->cnf.hop_limit;
1364 else
1365 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1366 rcu_read_unlock();
1367 }
1368 return hoplimit;
1369 }
1370 EXPORT_SYMBOL(ip6_dst_hoplimit);
1371
1372 /*
1373 *
1374 */
1375
1376 int ip6_route_add(struct fib6_config *cfg)
1377 {
1378 int err;
1379 struct net *net = cfg->fc_nlinfo.nl_net;
1380 struct rt6_info *rt = NULL;
1381 struct net_device *dev = NULL;
1382 struct inet6_dev *idev = NULL;
1383 struct fib6_table *table;
1384 int addr_type;
1385
1386 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1387 return -EINVAL;
1388 #ifndef CONFIG_IPV6_SUBTREES
1389 if (cfg->fc_src_len)
1390 return -EINVAL;
1391 #endif
1392 if (cfg->fc_ifindex) {
1393 err = -ENODEV;
1394 dev = dev_get_by_index(net, cfg->fc_ifindex);
1395 if (!dev)
1396 goto out;
1397 idev = in6_dev_get(dev);
1398 if (!idev)
1399 goto out;
1400 }
1401
1402 if (cfg->fc_metric == 0)
1403 cfg->fc_metric = IP6_RT_PRIO_USER;
1404
1405 err = -ENOBUFS;
1406 if (cfg->fc_nlinfo.nlh &&
1407 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1408 table = fib6_get_table(net, cfg->fc_table);
1409 if (!table) {
1410 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1411 table = fib6_new_table(net, cfg->fc_table);
1412 }
1413 } else {
1414 table = fib6_new_table(net, cfg->fc_table);
1415 }
1416
1417 if (!table)
1418 goto out;
1419
1420 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1421
1422 if (!rt) {
1423 err = -ENOMEM;
1424 goto out;
1425 }
1426
1427 if (cfg->fc_flags & RTF_EXPIRES)
1428 rt6_set_expires(rt, jiffies +
1429 clock_t_to_jiffies(cfg->fc_expires));
1430 else
1431 rt6_clean_expires(rt);
1432
1433 if (cfg->fc_protocol == RTPROT_UNSPEC)
1434 cfg->fc_protocol = RTPROT_BOOT;
1435 rt->rt6i_protocol = cfg->fc_protocol;
1436
1437 addr_type = ipv6_addr_type(&cfg->fc_dst);
1438
1439 if (addr_type & IPV6_ADDR_MULTICAST)
1440 rt->dst.input = ip6_mc_input;
1441 else if (cfg->fc_flags & RTF_LOCAL)
1442 rt->dst.input = ip6_input;
1443 else
1444 rt->dst.input = ip6_forward;
1445
1446 rt->dst.output = ip6_output;
1447
1448 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1449 rt->rt6i_dst.plen = cfg->fc_dst_len;
1450 if (rt->rt6i_dst.plen == 128)
1451 rt->dst.flags |= DST_HOST;
1452
1453 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1454 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1455 if (!metrics) {
1456 err = -ENOMEM;
1457 goto out;
1458 }
1459 dst_init_metrics(&rt->dst, metrics, 0);
1460 }
1461 #ifdef CONFIG_IPV6_SUBTREES
1462 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1463 rt->rt6i_src.plen = cfg->fc_src_len;
1464 #endif
1465
1466 rt->rt6i_metric = cfg->fc_metric;
1467
1468 /* We cannot add true routes via loopback here,
1469 they would result in kernel looping; promote them to reject routes
1470 */
1471 if ((cfg->fc_flags & RTF_REJECT) ||
1472 (dev && (dev->flags & IFF_LOOPBACK) &&
1473 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1474 !(cfg->fc_flags & RTF_LOCAL))) {
1475 /* hold loopback dev/idev if we haven't done so. */
1476 if (dev != net->loopback_dev) {
1477 if (dev) {
1478 dev_put(dev);
1479 in6_dev_put(idev);
1480 }
1481 dev = net->loopback_dev;
1482 dev_hold(dev);
1483 idev = in6_dev_get(dev);
1484 if (!idev) {
1485 err = -ENODEV;
1486 goto out;
1487 }
1488 }
1489 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1490 switch (cfg->fc_type) {
1491 case RTN_BLACKHOLE:
1492 rt->dst.error = -EINVAL;
1493 rt->dst.output = dst_discard;
1494 rt->dst.input = dst_discard;
1495 break;
1496 case RTN_PROHIBIT:
1497 rt->dst.error = -EACCES;
1498 rt->dst.output = ip6_pkt_prohibit_out;
1499 rt->dst.input = ip6_pkt_prohibit;
1500 break;
1501 case RTN_THROW:
1502 default:
1503 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1504 : -ENETUNREACH;
1505 rt->dst.output = ip6_pkt_discard_out;
1506 rt->dst.input = ip6_pkt_discard;
1507 break;
1508 }
1509 goto install_route;
1510 }
1511
1512 if (cfg->fc_flags & RTF_GATEWAY) {
1513 const struct in6_addr *gw_addr;
1514 int gwa_type;
1515
1516 gw_addr = &cfg->fc_gateway;
1517 rt->rt6i_gateway = *gw_addr;
1518 gwa_type = ipv6_addr_type(gw_addr);
1519
1520 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1521 struct rt6_info *grt;
1522
1523 /* IPv6 strictly inhibits using not link-local
1524 addresses as nexthop address.
1525 Otherwise, router will not able to send redirects.
1526 It is very good, but in some (rare!) circumstances
1527 (SIT, PtP, NBMA NOARP links) it is handy to allow
1528 some exceptions. --ANK
1529 */
1530 err = -EINVAL;
1531 if (!(gwa_type & IPV6_ADDR_UNICAST))
1532 goto out;
1533
1534 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1535
1536 err = -EHOSTUNREACH;
1537 if (!grt)
1538 goto out;
1539 if (dev) {
1540 if (dev != grt->dst.dev) {
1541 ip6_rt_put(grt);
1542 goto out;
1543 }
1544 } else {
1545 dev = grt->dst.dev;
1546 idev = grt->rt6i_idev;
1547 dev_hold(dev);
1548 in6_dev_hold(grt->rt6i_idev);
1549 }
1550 if (!(grt->rt6i_flags & RTF_GATEWAY))
1551 err = 0;
1552 ip6_rt_put(grt);
1553
1554 if (err)
1555 goto out;
1556 }
1557 err = -EINVAL;
1558 if (!dev || (dev->flags & IFF_LOOPBACK))
1559 goto out;
1560 }
1561
1562 err = -ENODEV;
1563 if (!dev)
1564 goto out;
1565
1566 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1567 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1568 err = -EINVAL;
1569 goto out;
1570 }
1571 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1572 rt->rt6i_prefsrc.plen = 128;
1573 } else
1574 rt->rt6i_prefsrc.plen = 0;
1575
1576 rt->rt6i_flags = cfg->fc_flags;
1577
1578 install_route:
1579 if (cfg->fc_mx) {
1580 struct nlattr *nla;
1581 int remaining;
1582
1583 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1584 int type = nla_type(nla);
1585
1586 if (type) {
1587 if (type > RTAX_MAX) {
1588 err = -EINVAL;
1589 goto out;
1590 }
1591
1592 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1593 }
1594 }
1595 }
1596
1597 rt->dst.dev = dev;
1598 rt->rt6i_idev = idev;
1599 rt->rt6i_table = table;
1600
1601 cfg->fc_nlinfo.nl_net = dev_net(dev);
1602
1603 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1604
1605 out:
1606 if (dev)
1607 dev_put(dev);
1608 if (idev)
1609 in6_dev_put(idev);
1610 if (rt)
1611 dst_free(&rt->dst);
1612 return err;
1613 }
1614
1615 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1616 {
1617 int err;
1618 struct fib6_table *table;
1619 struct net *net = dev_net(rt->dst.dev);
1620
1621 if (rt == net->ipv6.ip6_null_entry) {
1622 err = -ENOENT;
1623 goto out;
1624 }
1625
1626 table = rt->rt6i_table;
1627 write_lock_bh(&table->tb6_lock);
1628 err = fib6_del(rt, info);
1629 write_unlock_bh(&table->tb6_lock);
1630
1631 out:
1632 ip6_rt_put(rt);
1633 return err;
1634 }
1635
1636 int ip6_del_rt(struct rt6_info *rt)
1637 {
1638 struct nl_info info = {
1639 .nl_net = dev_net(rt->dst.dev),
1640 };
1641 return __ip6_del_rt(rt, &info);
1642 }
1643
1644 static int ip6_route_del(struct fib6_config *cfg)
1645 {
1646 struct fib6_table *table;
1647 struct fib6_node *fn;
1648 struct rt6_info *rt;
1649 int err = -ESRCH;
1650
1651 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1652 if (!table)
1653 return err;
1654
1655 read_lock_bh(&table->tb6_lock);
1656
1657 fn = fib6_locate(&table->tb6_root,
1658 &cfg->fc_dst, cfg->fc_dst_len,
1659 &cfg->fc_src, cfg->fc_src_len);
1660
1661 if (fn) {
1662 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1663 if (cfg->fc_ifindex &&
1664 (!rt->dst.dev ||
1665 rt->dst.dev->ifindex != cfg->fc_ifindex))
1666 continue;
1667 if (cfg->fc_flags & RTF_GATEWAY &&
1668 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1669 continue;
1670 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1671 continue;
1672 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
1673 continue;
1674 dst_hold(&rt->dst);
1675 read_unlock_bh(&table->tb6_lock);
1676
1677 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1678 }
1679 }
1680 read_unlock_bh(&table->tb6_lock);
1681
1682 return err;
1683 }
1684
1685 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1686 {
1687 struct net *net = dev_net(skb->dev);
1688 struct netevent_redirect netevent;
1689 struct rt6_info *rt, *nrt = NULL;
1690 struct ndisc_options ndopts;
1691 struct inet6_dev *in6_dev;
1692 struct neighbour *neigh;
1693 struct rd_msg *msg;
1694 int optlen, on_link;
1695 u8 *lladdr;
1696
1697 optlen = skb->tail - skb->transport_header;
1698 optlen -= sizeof(*msg);
1699
1700 if (optlen < 0) {
1701 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1702 return;
1703 }
1704
1705 msg = (struct rd_msg *)icmp6_hdr(skb);
1706
1707 if (ipv6_addr_is_multicast(&msg->dest)) {
1708 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1709 return;
1710 }
1711
1712 on_link = 0;
1713 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1714 on_link = 1;
1715 } else if (ipv6_addr_type(&msg->target) !=
1716 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1717 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1718 return;
1719 }
1720
1721 in6_dev = __in6_dev_get(skb->dev);
1722 if (!in6_dev)
1723 return;
1724 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1725 return;
1726
1727 /* RFC2461 8.1:
1728 * The IP source address of the Redirect MUST be the same as the current
1729 * first-hop router for the specified ICMP Destination Address.
1730 */
1731
1732 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1733 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1734 return;
1735 }
1736
1737 lladdr = NULL;
1738 if (ndopts.nd_opts_tgt_lladdr) {
1739 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1740 skb->dev);
1741 if (!lladdr) {
1742 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1743 return;
1744 }
1745 }
1746
1747 rt = (struct rt6_info *) dst;
1748 if (rt == net->ipv6.ip6_null_entry) {
1749 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1750 return;
1751 }
1752
1753 /* Redirect received -> path was valid.
1754 * Look, redirects are sent only in response to data packets,
1755 * so that this nexthop apparently is reachable. --ANK
1756 */
1757 dst_confirm(&rt->dst);
1758
1759 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1760 if (!neigh)
1761 return;
1762
1763 /*
1764 * We have finally decided to accept it.
1765 */
1766
1767 neigh_update(neigh, lladdr, NUD_STALE,
1768 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1769 NEIGH_UPDATE_F_OVERRIDE|
1770 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1771 NEIGH_UPDATE_F_ISROUTER))
1772 );
1773
1774 nrt = ip6_rt_copy(rt, &msg->dest);
1775 if (!nrt)
1776 goto out;
1777
1778 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1779 if (on_link)
1780 nrt->rt6i_flags &= ~RTF_GATEWAY;
1781
1782 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1783
1784 if (ip6_ins_rt(nrt))
1785 goto out;
1786
1787 netevent.old = &rt->dst;
1788 netevent.new = &nrt->dst;
1789 netevent.daddr = &msg->dest;
1790 netevent.neigh = neigh;
1791 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1792
1793 if (rt->rt6i_flags & RTF_CACHE) {
1794 rt = (struct rt6_info *) dst_clone(&rt->dst);
1795 ip6_del_rt(rt);
1796 }
1797
1798 out:
1799 neigh_release(neigh);
1800 }
1801
1802 /*
1803 * Misc support functions
1804 */
1805
1806 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1807 const struct in6_addr *dest)
1808 {
1809 struct net *net = dev_net(ort->dst.dev);
1810 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1811 ort->rt6i_table);
1812
1813 if (rt) {
1814 rt->dst.input = ort->dst.input;
1815 rt->dst.output = ort->dst.output;
1816 rt->dst.flags |= DST_HOST;
1817
1818 rt->rt6i_dst.addr = *dest;
1819 rt->rt6i_dst.plen = 128;
1820 dst_copy_metrics(&rt->dst, &ort->dst);
1821 rt->dst.error = ort->dst.error;
1822 rt->rt6i_idev = ort->rt6i_idev;
1823 if (rt->rt6i_idev)
1824 in6_dev_hold(rt->rt6i_idev);
1825 rt->dst.lastuse = jiffies;
1826
1827 if (ort->rt6i_flags & RTF_GATEWAY)
1828 rt->rt6i_gateway = ort->rt6i_gateway;
1829 else
1830 rt->rt6i_gateway = *dest;
1831 rt->rt6i_flags = ort->rt6i_flags;
1832 rt6_set_from(rt, ort);
1833 rt->rt6i_metric = 0;
1834
1835 #ifdef CONFIG_IPV6_SUBTREES
1836 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1837 #endif
1838 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1839 rt->rt6i_table = ort->rt6i_table;
1840 }
1841 return rt;
1842 }
1843
1844 #ifdef CONFIG_IPV6_ROUTE_INFO
1845 static struct rt6_info *rt6_get_route_info(struct net_device *dev,
1846 const struct in6_addr *prefix, int prefixlen,
1847 const struct in6_addr *gwaddr)
1848 {
1849 struct fib6_node *fn;
1850 struct rt6_info *rt = NULL;
1851 struct fib6_table *table;
1852
1853 table = fib6_get_table(dev_net(dev),
1854 addrconf_rt_table(dev, RT6_TABLE_INFO));
1855 if (!table)
1856 return NULL;
1857
1858 read_lock_bh(&table->tb6_lock);
1859 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1860 if (!fn)
1861 goto out;
1862
1863 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1864 if (rt->dst.dev->ifindex != dev->ifindex)
1865 continue;
1866 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1867 continue;
1868 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1869 continue;
1870 dst_hold(&rt->dst);
1871 break;
1872 }
1873 out:
1874 read_unlock_bh(&table->tb6_lock);
1875 return rt;
1876 }
1877
1878 static struct rt6_info *rt6_add_route_info(struct net_device *dev,
1879 const struct in6_addr *prefix, int prefixlen,
1880 const struct in6_addr *gwaddr, unsigned int pref)
1881 {
1882 struct fib6_config cfg = {
1883 .fc_table = addrconf_rt_table(dev, RT6_TABLE_INFO),
1884 .fc_metric = IP6_RT_PRIO_USER,
1885 .fc_ifindex = dev->ifindex,
1886 .fc_dst_len = prefixlen,
1887 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1888 RTF_UP | RTF_PREF(pref),
1889 .fc_nlinfo.portid = 0,
1890 .fc_nlinfo.nlh = NULL,
1891 .fc_nlinfo.nl_net = dev_net(dev),
1892 };
1893
1894 cfg.fc_dst = *prefix;
1895 cfg.fc_gateway = *gwaddr;
1896
1897 /* We should treat it as a default route if prefix length is 0. */
1898 if (!prefixlen)
1899 cfg.fc_flags |= RTF_DEFAULT;
1900
1901 ip6_route_add(&cfg);
1902
1903 return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
1904 }
1905 #endif
1906
1907 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1908 {
1909 struct rt6_info *rt;
1910 struct fib6_table *table;
1911
1912 table = fib6_get_table(dev_net(dev),
1913 addrconf_rt_table(dev, RT6_TABLE_MAIN));
1914 if (!table)
1915 return NULL;
1916
1917 read_lock_bh(&table->tb6_lock);
1918 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1919 if (dev == rt->dst.dev &&
1920 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1921 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1922 break;
1923 }
1924 if (rt)
1925 dst_hold(&rt->dst);
1926 read_unlock_bh(&table->tb6_lock);
1927 return rt;
1928 }
1929
1930 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1931 struct net_device *dev,
1932 unsigned int pref)
1933 {
1934 struct fib6_config cfg = {
1935 .fc_table = addrconf_rt_table(dev, RT6_TABLE_DFLT),
1936 .fc_metric = IP6_RT_PRIO_USER,
1937 .fc_ifindex = dev->ifindex,
1938 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1939 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1940 .fc_nlinfo.portid = 0,
1941 .fc_nlinfo.nlh = NULL,
1942 .fc_nlinfo.nl_net = dev_net(dev),
1943 };
1944
1945 cfg.fc_gateway = *gwaddr;
1946
1947 ip6_route_add(&cfg);
1948
1949 return rt6_get_dflt_router(gwaddr, dev);
1950 }
1951
1952
1953 int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
1954 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1955 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
1956 return -1;
1957 return 0;
1958 }
1959
1960 void rt6_purge_dflt_routers(struct net *net)
1961 {
1962 fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
1963 }
1964
1965 static void rtmsg_to_fib6_config(struct net *net,
1966 struct in6_rtmsg *rtmsg,
1967 struct fib6_config *cfg)
1968 {
1969 memset(cfg, 0, sizeof(*cfg));
1970
1971 cfg->fc_table = RT6_TABLE_MAIN;
1972 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1973 cfg->fc_metric = rtmsg->rtmsg_metric;
1974 cfg->fc_expires = rtmsg->rtmsg_info;
1975 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1976 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1977 cfg->fc_flags = rtmsg->rtmsg_flags;
1978
1979 cfg->fc_nlinfo.nl_net = net;
1980
1981 cfg->fc_dst = rtmsg->rtmsg_dst;
1982 cfg->fc_src = rtmsg->rtmsg_src;
1983 cfg->fc_gateway = rtmsg->rtmsg_gateway;
1984 }
1985
1986 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1987 {
1988 struct fib6_config cfg;
1989 struct in6_rtmsg rtmsg;
1990 int err;
1991
1992 switch(cmd) {
1993 case SIOCADDRT: /* Add a route */
1994 case SIOCDELRT: /* Delete a route */
1995 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1996 return -EPERM;
1997 err = copy_from_user(&rtmsg, arg,
1998 sizeof(struct in6_rtmsg));
1999 if (err)
2000 return -EFAULT;
2001
2002 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2003
2004 rtnl_lock();
2005 switch (cmd) {
2006 case SIOCADDRT:
2007 err = ip6_route_add(&cfg);
2008 break;
2009 case SIOCDELRT:
2010 err = ip6_route_del(&cfg);
2011 break;
2012 default:
2013 err = -EINVAL;
2014 }
2015 rtnl_unlock();
2016
2017 return err;
2018 }
2019
2020 return -EINVAL;
2021 }
2022
2023 /*
2024 * Drop the packet on the floor
2025 */
2026
2027 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2028 {
2029 int type;
2030 struct dst_entry *dst = skb_dst(skb);
2031 switch (ipstats_mib_noroutes) {
2032 case IPSTATS_MIB_INNOROUTES:
2033 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2034 if (type == IPV6_ADDR_ANY) {
2035 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2036 IPSTATS_MIB_INADDRERRORS);
2037 break;
2038 }
2039 /* FALLTHROUGH */
2040 case IPSTATS_MIB_OUTNOROUTES:
2041 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2042 ipstats_mib_noroutes);
2043 break;
2044 }
2045 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2046 kfree_skb(skb);
2047 return 0;
2048 }
2049
2050 static int ip6_pkt_discard(struct sk_buff *skb)
2051 {
2052 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2053 }
2054
2055 static int ip6_pkt_discard_out(struct sk_buff *skb)
2056 {
2057 skb->dev = skb_dst(skb)->dev;
2058 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2059 }
2060
2061 static int ip6_pkt_prohibit(struct sk_buff *skb)
2062 {
2063 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2064 }
2065
2066 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2067 {
2068 skb->dev = skb_dst(skb)->dev;
2069 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2070 }
2071
2072 /*
2073 * Allocate a dst for local (unicast / anycast) address.
2074 */
2075
2076 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2077 const struct in6_addr *addr,
2078 bool anycast)
2079 {
2080 struct net *net = dev_net(idev->dev);
2081 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2082 DST_NOCOUNT, NULL);
2083 if (!rt)
2084 return ERR_PTR(-ENOMEM);
2085
2086 in6_dev_hold(idev);
2087
2088 rt->dst.flags |= DST_HOST;
2089 rt->dst.input = ip6_input;
2090 rt->dst.output = ip6_output;
2091 rt->rt6i_idev = idev;
2092
2093 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2094 if (anycast)
2095 rt->rt6i_flags |= RTF_ANYCAST;
2096 else
2097 rt->rt6i_flags |= RTF_LOCAL;
2098
2099 rt->rt6i_gateway = *addr;
2100 rt->rt6i_dst.addr = *addr;
2101 rt->rt6i_dst.plen = 128;
2102 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2103
2104 atomic_set(&rt->dst.__refcnt, 1);
2105
2106 return rt;
2107 }
2108
2109 int ip6_route_get_saddr(struct net *net,
2110 struct rt6_info *rt,
2111 const struct in6_addr *daddr,
2112 unsigned int prefs,
2113 struct in6_addr *saddr)
2114 {
2115 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2116 int err = 0;
2117 if (rt->rt6i_prefsrc.plen)
2118 *saddr = rt->rt6i_prefsrc.addr;
2119 else
2120 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2121 daddr, prefs, saddr);
2122 return err;
2123 }
2124
2125 /* remove deleted ip from prefsrc entries */
2126 struct arg_dev_net_ip {
2127 struct net_device *dev;
2128 struct net *net;
2129 struct in6_addr *addr;
2130 };
2131
2132 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2133 {
2134 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2135 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2136 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2137
2138 if (((void *)rt->dst.dev == dev || !dev) &&
2139 rt != net->ipv6.ip6_null_entry &&
2140 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2141 /* remove prefsrc entry */
2142 rt->rt6i_prefsrc.plen = 0;
2143 }
2144 return 0;
2145 }
2146
2147 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2148 {
2149 struct net *net = dev_net(ifp->idev->dev);
2150 struct arg_dev_net_ip adni = {
2151 .dev = ifp->idev->dev,
2152 .net = net,
2153 .addr = &ifp->addr,
2154 };
2155 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2156 }
2157
2158 struct arg_dev_net {
2159 struct net_device *dev;
2160 struct net *net;
2161 };
2162
2163 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2164 {
2165 const struct arg_dev_net *adn = arg;
2166 const struct net_device *dev = adn->dev;
2167
2168 if ((rt->dst.dev == dev || !dev) &&
2169 rt != adn->net->ipv6.ip6_null_entry)
2170 return -1;
2171
2172 return 0;
2173 }
2174
2175 void rt6_ifdown(struct net *net, struct net_device *dev)
2176 {
2177 struct arg_dev_net adn = {
2178 .dev = dev,
2179 .net = net,
2180 };
2181
2182 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2183 icmp6_clean_all(fib6_ifdown, &adn);
2184 }
2185
2186 struct rt6_mtu_change_arg {
2187 struct net_device *dev;
2188 unsigned int mtu;
2189 };
2190
2191 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2192 {
2193 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2194 struct inet6_dev *idev;
2195
2196 /* In IPv6 pmtu discovery is not optional,
2197 so that RTAX_MTU lock cannot disable it.
2198 We still use this lock to block changes
2199 caused by addrconf/ndisc.
2200 */
2201
2202 idev = __in6_dev_get(arg->dev);
2203 if (!idev)
2204 return 0;
2205
2206 /* For administrative MTU increase, there is no way to discover
2207 IPv6 PMTU increase, so PMTU increase should be updated here.
2208 Since RFC 1981 doesn't include administrative MTU increase
2209 update PMTU increase is a MUST. (i.e. jumbo frame)
2210 */
2211 /*
2212 If new MTU is less than route PMTU, this new MTU will be the
2213 lowest MTU in the path, update the route PMTU to reflect PMTU
2214 decreases; if new MTU is greater than route PMTU, and the
2215 old MTU is the lowest MTU in the path, update the route PMTU
2216 to reflect the increase. In this case if the other nodes' MTU
2217 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2218 PMTU discouvery.
2219 */
2220 if (rt->dst.dev == arg->dev &&
2221 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2222 (dst_mtu(&rt->dst) >= arg->mtu ||
2223 (dst_mtu(&rt->dst) < arg->mtu &&
2224 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2225 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2226 }
2227 return 0;
2228 }
2229
2230 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2231 {
2232 struct rt6_mtu_change_arg arg = {
2233 .dev = dev,
2234 .mtu = mtu,
2235 };
2236
2237 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2238 }
2239
2240 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2241 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2242 [RTA_OIF] = { .type = NLA_U32 },
2243 [RTA_IIF] = { .type = NLA_U32 },
2244 [RTA_PRIORITY] = { .type = NLA_U32 },
2245 [RTA_METRICS] = { .type = NLA_NESTED },
2246 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2247 [RTA_UID] = { .type = NLA_U32 },
2248 };
2249
2250 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2251 struct fib6_config *cfg)
2252 {
2253 struct rtmsg *rtm;
2254 struct nlattr *tb[RTA_MAX+1];
2255 int err;
2256
2257 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2258 if (err < 0)
2259 goto errout;
2260
2261 err = -EINVAL;
2262 rtm = nlmsg_data(nlh);
2263 memset(cfg, 0, sizeof(*cfg));
2264
2265 cfg->fc_table = rtm->rtm_table;
2266 cfg->fc_dst_len = rtm->rtm_dst_len;
2267 cfg->fc_src_len = rtm->rtm_src_len;
2268 cfg->fc_flags = RTF_UP;
2269 cfg->fc_protocol = rtm->rtm_protocol;
2270 cfg->fc_type = rtm->rtm_type;
2271
2272 if (rtm->rtm_type == RTN_UNREACHABLE ||
2273 rtm->rtm_type == RTN_BLACKHOLE ||
2274 rtm->rtm_type == RTN_PROHIBIT ||
2275 rtm->rtm_type == RTN_THROW)
2276 cfg->fc_flags |= RTF_REJECT;
2277
2278 if (rtm->rtm_type == RTN_LOCAL)
2279 cfg->fc_flags |= RTF_LOCAL;
2280
2281 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2282 cfg->fc_nlinfo.nlh = nlh;
2283 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2284
2285 if (tb[RTA_GATEWAY]) {
2286 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2287 cfg->fc_flags |= RTF_GATEWAY;
2288 }
2289
2290 if (tb[RTA_DST]) {
2291 int plen = (rtm->rtm_dst_len + 7) >> 3;
2292
2293 if (nla_len(tb[RTA_DST]) < plen)
2294 goto errout;
2295
2296 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2297 }
2298
2299 if (tb[RTA_SRC]) {
2300 int plen = (rtm->rtm_src_len + 7) >> 3;
2301
2302 if (nla_len(tb[RTA_SRC]) < plen)
2303 goto errout;
2304
2305 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2306 }
2307
2308 if (tb[RTA_PREFSRC])
2309 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2310
2311 if (tb[RTA_OIF])
2312 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2313
2314 if (tb[RTA_PRIORITY])
2315 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2316
2317 if (tb[RTA_METRICS]) {
2318 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2319 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2320 }
2321
2322 if (tb[RTA_TABLE])
2323 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2324
2325 if (tb[RTA_MULTIPATH]) {
2326 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2327 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2328 }
2329
2330 err = 0;
2331 errout:
2332 return err;
2333 }
2334
2335 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2336 {
2337 struct fib6_config r_cfg;
2338 struct rtnexthop *rtnh;
2339 int remaining;
2340 int attrlen;
2341 int err = 0, last_err = 0;
2342
2343 beginning:
2344 rtnh = (struct rtnexthop *)cfg->fc_mp;
2345 remaining = cfg->fc_mp_len;
2346
2347 /* Parse a Multipath Entry */
2348 while (rtnh_ok(rtnh, remaining)) {
2349 memcpy(&r_cfg, cfg, sizeof(*cfg));
2350 if (rtnh->rtnh_ifindex)
2351 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2352
2353 attrlen = rtnh_attrlen(rtnh);
2354 if (attrlen > 0) {
2355 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2356
2357 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2358 if (nla) {
2359 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2360 r_cfg.fc_flags |= RTF_GATEWAY;
2361 }
2362 }
2363 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2364 if (err) {
2365 last_err = err;
2366 /* If we are trying to remove a route, do not stop the
2367 * loop when ip6_route_del() fails (because next hop is
2368 * already gone), we should try to remove all next hops.
2369 */
2370 if (add) {
2371 /* If add fails, we should try to delete all
2372 * next hops that have been already added.
2373 */
2374 add = 0;
2375 goto beginning;
2376 }
2377 }
2378 /* Because each route is added like a single route we remove
2379 * this flag after the first nexthop (if there is a collision,
2380 * we have already fail to add the first nexthop:
2381 * fib6_add_rt2node() has reject it).
2382 */
2383 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2384 rtnh = rtnh_next(rtnh, &remaining);
2385 }
2386
2387 return last_err;
2388 }
2389
2390 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2391 {
2392 struct fib6_config cfg;
2393 int err;
2394
2395 err = rtm_to_fib6_config(skb, nlh, &cfg);
2396 if (err < 0)
2397 return err;
2398
2399 if (cfg.fc_mp)
2400 return ip6_route_multipath(&cfg, 0);
2401 else
2402 return ip6_route_del(&cfg);
2403 }
2404
2405 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2406 {
2407 struct fib6_config cfg;
2408 int err;
2409
2410 err = rtm_to_fib6_config(skb, nlh, &cfg);
2411 if (err < 0)
2412 return err;
2413
2414 if (cfg.fc_mp)
2415 return ip6_route_multipath(&cfg, 1);
2416 else
2417 return ip6_route_add(&cfg);
2418 }
2419
2420 static inline size_t rt6_nlmsg_size(void)
2421 {
2422 return NLMSG_ALIGN(sizeof(struct rtmsg))
2423 + nla_total_size(16) /* RTA_SRC */
2424 + nla_total_size(16) /* RTA_DST */
2425 + nla_total_size(16) /* RTA_GATEWAY */
2426 + nla_total_size(16) /* RTA_PREFSRC */
2427 + nla_total_size(4) /* RTA_TABLE */
2428 + nla_total_size(4) /* RTA_IIF */
2429 + nla_total_size(4) /* RTA_OIF */
2430 + nla_total_size(4) /* RTA_PRIORITY */
2431 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2432 + nla_total_size(sizeof(struct rta_cacheinfo));
2433 }
2434
2435 static int rt6_fill_node(struct net *net,
2436 struct sk_buff *skb, struct rt6_info *rt,
2437 struct in6_addr *dst, struct in6_addr *src,
2438 int iif, int type, u32 portid, u32 seq,
2439 int prefix, int nowait, unsigned int flags)
2440 {
2441 struct rtmsg *rtm;
2442 struct nlmsghdr *nlh;
2443 long expires;
2444 u32 table;
2445
2446 if (prefix) { /* user wants prefix routes only */
2447 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2448 /* success since this is not a prefix route */
2449 return 1;
2450 }
2451 }
2452
2453 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2454 if (!nlh)
2455 return -EMSGSIZE;
2456
2457 rtm = nlmsg_data(nlh);
2458 rtm->rtm_family = AF_INET6;
2459 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2460 rtm->rtm_src_len = rt->rt6i_src.plen;
2461 rtm->rtm_tos = 0;
2462 if (rt->rt6i_table)
2463 table = rt->rt6i_table->tb6_id;
2464 else
2465 table = RT6_TABLE_UNSPEC;
2466 rtm->rtm_table = table;
2467 if (nla_put_u32(skb, RTA_TABLE, table))
2468 goto nla_put_failure;
2469 if (rt->rt6i_flags & RTF_REJECT) {
2470 switch (rt->dst.error) {
2471 case -EINVAL:
2472 rtm->rtm_type = RTN_BLACKHOLE;
2473 break;
2474 case -EACCES:
2475 rtm->rtm_type = RTN_PROHIBIT;
2476 break;
2477 case -EAGAIN:
2478 rtm->rtm_type = RTN_THROW;
2479 break;
2480 default:
2481 rtm->rtm_type = RTN_UNREACHABLE;
2482 break;
2483 }
2484 }
2485 else if (rt->rt6i_flags & RTF_LOCAL)
2486 rtm->rtm_type = RTN_LOCAL;
2487 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2488 rtm->rtm_type = RTN_LOCAL;
2489 else
2490 rtm->rtm_type = RTN_UNICAST;
2491 rtm->rtm_flags = 0;
2492 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2493 rtm->rtm_protocol = rt->rt6i_protocol;
2494 if (rt->rt6i_flags & RTF_DYNAMIC)
2495 rtm->rtm_protocol = RTPROT_REDIRECT;
2496 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2497 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2498 rtm->rtm_protocol = RTPROT_RA;
2499 else
2500 rtm->rtm_protocol = RTPROT_KERNEL;
2501 }
2502
2503 if (rt->rt6i_flags & RTF_CACHE)
2504 rtm->rtm_flags |= RTM_F_CLONED;
2505
2506 if (dst) {
2507 if (nla_put(skb, RTA_DST, 16, dst))
2508 goto nla_put_failure;
2509 rtm->rtm_dst_len = 128;
2510 } else if (rtm->rtm_dst_len)
2511 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2512 goto nla_put_failure;
2513 #ifdef CONFIG_IPV6_SUBTREES
2514 if (src) {
2515 if (nla_put(skb, RTA_SRC, 16, src))
2516 goto nla_put_failure;
2517 rtm->rtm_src_len = 128;
2518 } else if (rtm->rtm_src_len &&
2519 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2520 goto nla_put_failure;
2521 #endif
2522 if (iif) {
2523 #ifdef CONFIG_IPV6_MROUTE
2524 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2525 int err = ip6mr_get_route(net, skb, rtm, nowait,
2526 portid);
2527
2528 if (err <= 0) {
2529 if (!nowait) {
2530 if (err == 0)
2531 return 0;
2532 goto nla_put_failure;
2533 } else {
2534 if (err == -EMSGSIZE)
2535 goto nla_put_failure;
2536 }
2537 }
2538 } else
2539 #endif
2540 if (nla_put_u32(skb, RTA_IIF, iif))
2541 goto nla_put_failure;
2542 } else if (dst) {
2543 struct in6_addr saddr_buf;
2544 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2545 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2546 goto nla_put_failure;
2547 }
2548
2549 if (rt->rt6i_prefsrc.plen) {
2550 struct in6_addr saddr_buf;
2551 saddr_buf = rt->rt6i_prefsrc.addr;
2552 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2553 goto nla_put_failure;
2554 }
2555
2556 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2557 goto nla_put_failure;
2558
2559 if (rt->rt6i_flags & RTF_GATEWAY) {
2560 if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2561 goto nla_put_failure;
2562 }
2563
2564 if (rt->dst.dev &&
2565 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2566 goto nla_put_failure;
2567 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2568 goto nla_put_failure;
2569
2570 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2571
2572 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2573 goto nla_put_failure;
2574
2575 return nlmsg_end(skb, nlh);
2576
2577 nla_put_failure:
2578 nlmsg_cancel(skb, nlh);
2579 return -EMSGSIZE;
2580 }
2581
2582 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2583 {
2584 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2585 int prefix;
2586
2587 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2588 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2589 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2590 } else
2591 prefix = 0;
2592
2593 return rt6_fill_node(arg->net,
2594 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2595 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2596 prefix, 0, NLM_F_MULTI);
2597 }
2598
2599 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2600 {
2601 struct net *net = sock_net(in_skb->sk);
2602 struct nlattr *tb[RTA_MAX+1];
2603 struct rt6_info *rt;
2604 struct sk_buff *skb;
2605 struct rtmsg *rtm;
2606 struct flowi6 fl6;
2607 int err, iif = 0, oif = 0;
2608
2609 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2610 if (err < 0)
2611 goto errout;
2612
2613 err = -EINVAL;
2614 memset(&fl6, 0, sizeof(fl6));
2615
2616 if (tb[RTA_SRC]) {
2617 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2618 goto errout;
2619
2620 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2621 }
2622
2623 if (tb[RTA_DST]) {
2624 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2625 goto errout;
2626
2627 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2628 }
2629
2630 if (tb[RTA_IIF])
2631 iif = nla_get_u32(tb[RTA_IIF]);
2632
2633 if (tb[RTA_OIF])
2634 oif = nla_get_u32(tb[RTA_OIF]);
2635
2636 if (tb[RTA_MARK])
2637 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2638
2639 if (tb[RTA_UID])
2640 fl6.flowi6_uid = make_kuid(current_user_ns(),
2641 nla_get_u32(tb[RTA_UID]));
2642 else
2643 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
2644
2645 if (iif) {
2646 struct net_device *dev;
2647 int flags = 0;
2648
2649 dev = __dev_get_by_index(net, iif);
2650 if (!dev) {
2651 err = -ENODEV;
2652 goto errout;
2653 }
2654
2655 fl6.flowi6_iif = iif;
2656
2657 if (!ipv6_addr_any(&fl6.saddr))
2658 flags |= RT6_LOOKUP_F_HAS_SADDR;
2659
2660 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2661 flags);
2662 } else {
2663 fl6.flowi6_oif = oif;
2664
2665 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2666 }
2667
2668 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2669 if (!skb) {
2670 ip6_rt_put(rt);
2671 err = -ENOBUFS;
2672 goto errout;
2673 }
2674
2675 /* Reserve room for dummy headers, this skb can pass
2676 through good chunk of routing engine.
2677 */
2678 skb_reset_mac_header(skb);
2679 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2680
2681 skb_dst_set(skb, &rt->dst);
2682
2683 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2684 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2685 nlh->nlmsg_seq, 0, 0, 0);
2686 if (err < 0) {
2687 kfree_skb(skb);
2688 goto errout;
2689 }
2690
2691 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2692 errout:
2693 return err;
2694 }
2695
2696 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2697 {
2698 struct sk_buff *skb;
2699 struct net *net = info->nl_net;
2700 u32 seq;
2701 int err;
2702
2703 err = -ENOBUFS;
2704 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2705
2706 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2707 if (!skb)
2708 goto errout;
2709
2710 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2711 event, info->portid, seq, 0, 0, 0);
2712 if (err < 0) {
2713 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2714 WARN_ON(err == -EMSGSIZE);
2715 kfree_skb(skb);
2716 goto errout;
2717 }
2718 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2719 info->nlh, gfp_any());
2720 return;
2721 errout:
2722 if (err < 0)
2723 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2724 }
2725
2726 static int ip6_route_dev_notify(struct notifier_block *this,
2727 unsigned long event, void *data)
2728 {
2729 struct net_device *dev = (struct net_device *)data;
2730 struct net *net = dev_net(dev);
2731
2732 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2733 net->ipv6.ip6_null_entry->dst.dev = dev;
2734 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2735 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2736 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2737 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2738 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2739 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2740 #endif
2741 }
2742
2743 return NOTIFY_OK;
2744 }
2745
2746 /*
2747 * /proc
2748 */
2749
2750 #ifdef CONFIG_PROC_FS
2751
2752 struct rt6_proc_arg
2753 {
2754 char *buffer;
2755 int offset;
2756 int length;
2757 int skip;
2758 int len;
2759 };
2760
2761 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2762 {
2763 struct seq_file *m = p_arg;
2764
2765 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2766
2767 #ifdef CONFIG_IPV6_SUBTREES
2768 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2769 #else
2770 seq_puts(m, "00000000000000000000000000000000 00 ");
2771 #endif
2772 if (rt->rt6i_flags & RTF_GATEWAY) {
2773 seq_printf(m, "%pi6", &rt->rt6i_gateway);
2774 } else {
2775 seq_puts(m, "00000000000000000000000000000000");
2776 }
2777 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2778 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2779 rt->dst.__use, rt->rt6i_flags,
2780 rt->dst.dev ? rt->dst.dev->name : "");
2781 return 0;
2782 }
2783
2784 static int ipv6_route_show(struct seq_file *m, void *v)
2785 {
2786 struct net *net = (struct net *)m->private;
2787 fib6_clean_all_ro(net, rt6_info_route, 0, m);
2788 return 0;
2789 }
2790
2791 static int ipv6_route_open(struct inode *inode, struct file *file)
2792 {
2793 return single_open_net(inode, file, ipv6_route_show);
2794 }
2795
2796 static const struct file_operations ipv6_route_proc_fops = {
2797 .owner = THIS_MODULE,
2798 .open = ipv6_route_open,
2799 .read = seq_read,
2800 .llseek = seq_lseek,
2801 .release = single_release_net,
2802 };
2803
2804 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2805 {
2806 struct net *net = (struct net *)seq->private;
2807 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2808 net->ipv6.rt6_stats->fib_nodes,
2809 net->ipv6.rt6_stats->fib_route_nodes,
2810 net->ipv6.rt6_stats->fib_rt_alloc,
2811 net->ipv6.rt6_stats->fib_rt_entries,
2812 net->ipv6.rt6_stats->fib_rt_cache,
2813 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2814 net->ipv6.rt6_stats->fib_discarded_routes);
2815
2816 return 0;
2817 }
2818
2819 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2820 {
2821 return single_open_net(inode, file, rt6_stats_seq_show);
2822 }
2823
2824 static const struct file_operations rt6_stats_seq_fops = {
2825 .owner = THIS_MODULE,
2826 .open = rt6_stats_seq_open,
2827 .read = seq_read,
2828 .llseek = seq_lseek,
2829 .release = single_release_net,
2830 };
2831 #endif /* CONFIG_PROC_FS */
2832
2833 #ifdef CONFIG_SYSCTL
2834
2835 static
2836 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2837 void __user *buffer, size_t *lenp, loff_t *ppos)
2838 {
2839 struct net *net;
2840 int delay;
2841 if (!write)
2842 return -EINVAL;
2843
2844 net = (struct net *)ctl->extra1;
2845 delay = net->ipv6.sysctl.flush_delay;
2846 proc_dointvec(ctl, write, buffer, lenp, ppos);
2847 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
2848 return 0;
2849 }
2850
2851 ctl_table ipv6_route_table_template[] = {
2852 {
2853 .procname = "flush",
2854 .data = &init_net.ipv6.sysctl.flush_delay,
2855 .maxlen = sizeof(int),
2856 .mode = 0200,
2857 .proc_handler = ipv6_sysctl_rtcache_flush
2858 },
2859 {
2860 .procname = "gc_thresh",
2861 .data = &ip6_dst_ops_template.gc_thresh,
2862 .maxlen = sizeof(int),
2863 .mode = 0644,
2864 .proc_handler = proc_dointvec,
2865 },
2866 {
2867 .procname = "max_size",
2868 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2869 .maxlen = sizeof(int),
2870 .mode = 0644,
2871 .proc_handler = proc_dointvec,
2872 },
2873 {
2874 .procname = "gc_min_interval",
2875 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2876 .maxlen = sizeof(int),
2877 .mode = 0644,
2878 .proc_handler = proc_dointvec_jiffies,
2879 },
2880 {
2881 .procname = "gc_timeout",
2882 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2883 .maxlen = sizeof(int),
2884 .mode = 0644,
2885 .proc_handler = proc_dointvec_jiffies,
2886 },
2887 {
2888 .procname = "gc_interval",
2889 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2890 .maxlen = sizeof(int),
2891 .mode = 0644,
2892 .proc_handler = proc_dointvec_jiffies,
2893 },
2894 {
2895 .procname = "gc_elasticity",
2896 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2897 .maxlen = sizeof(int),
2898 .mode = 0644,
2899 .proc_handler = proc_dointvec,
2900 },
2901 {
2902 .procname = "mtu_expires",
2903 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2904 .maxlen = sizeof(int),
2905 .mode = 0644,
2906 .proc_handler = proc_dointvec_jiffies,
2907 },
2908 {
2909 .procname = "min_adv_mss",
2910 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2911 .maxlen = sizeof(int),
2912 .mode = 0644,
2913 .proc_handler = proc_dointvec,
2914 },
2915 {
2916 .procname = "gc_min_interval_ms",
2917 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2918 .maxlen = sizeof(int),
2919 .mode = 0644,
2920 .proc_handler = proc_dointvec_ms_jiffies,
2921 },
2922 { }
2923 };
2924
2925 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2926 {
2927 struct ctl_table *table;
2928
2929 table = kmemdup(ipv6_route_table_template,
2930 sizeof(ipv6_route_table_template),
2931 GFP_KERNEL);
2932
2933 if (table) {
2934 table[0].data = &net->ipv6.sysctl.flush_delay;
2935 table[0].extra1 = net;
2936 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2937 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2938 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2939 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2940 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2941 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2942 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2943 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2944 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2945
2946 /* Don't export sysctls to unprivileged users */
2947 if (net->user_ns != &init_user_ns)
2948 table[0].procname = NULL;
2949 }
2950
2951 return table;
2952 }
2953 #endif
2954
2955 static int __net_init ip6_route_net_init(struct net *net)
2956 {
2957 int ret = -ENOMEM;
2958
2959 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2960 sizeof(net->ipv6.ip6_dst_ops));
2961
2962 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2963 goto out_ip6_dst_ops;
2964
2965 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2966 sizeof(*net->ipv6.ip6_null_entry),
2967 GFP_KERNEL);
2968 if (!net->ipv6.ip6_null_entry)
2969 goto out_ip6_dst_entries;
2970 net->ipv6.ip6_null_entry->dst.path =
2971 (struct dst_entry *)net->ipv6.ip6_null_entry;
2972 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2973 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2974 ip6_template_metrics, true);
2975
2976 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2977 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2978 sizeof(*net->ipv6.ip6_prohibit_entry),
2979 GFP_KERNEL);
2980 if (!net->ipv6.ip6_prohibit_entry)
2981 goto out_ip6_null_entry;
2982 net->ipv6.ip6_prohibit_entry->dst.path =
2983 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2984 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2985 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2986 ip6_template_metrics, true);
2987
2988 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2989 sizeof(*net->ipv6.ip6_blk_hole_entry),
2990 GFP_KERNEL);
2991 if (!net->ipv6.ip6_blk_hole_entry)
2992 goto out_ip6_prohibit_entry;
2993 net->ipv6.ip6_blk_hole_entry->dst.path =
2994 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2995 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2996 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2997 ip6_template_metrics, true);
2998 #endif
2999
3000 net->ipv6.sysctl.flush_delay = 0;
3001 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3002 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3003 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3004 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3005 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3006 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3007 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3008
3009 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3010
3011 ret = 0;
3012 out:
3013 return ret;
3014
3015 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3016 out_ip6_prohibit_entry:
3017 kfree(net->ipv6.ip6_prohibit_entry);
3018 out_ip6_null_entry:
3019 kfree(net->ipv6.ip6_null_entry);
3020 #endif
3021 out_ip6_dst_entries:
3022 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3023 out_ip6_dst_ops:
3024 goto out;
3025 }
3026
3027 static void __net_exit ip6_route_net_exit(struct net *net)
3028 {
3029 kfree(net->ipv6.ip6_null_entry);
3030 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3031 kfree(net->ipv6.ip6_prohibit_entry);
3032 kfree(net->ipv6.ip6_blk_hole_entry);
3033 #endif
3034 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3035 }
3036
3037 static int __net_init ip6_route_net_init_late(struct net *net)
3038 {
3039 #ifdef CONFIG_PROC_FS
3040 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3041 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3042 #endif
3043 return 0;
3044 }
3045
3046 static void __net_exit ip6_route_net_exit_late(struct net *net)
3047 {
3048 #ifdef CONFIG_PROC_FS
3049 remove_proc_entry("ipv6_route", net->proc_net);
3050 remove_proc_entry("rt6_stats", net->proc_net);
3051 #endif
3052 }
3053
3054 static struct pernet_operations ip6_route_net_ops = {
3055 .init = ip6_route_net_init,
3056 .exit = ip6_route_net_exit,
3057 };
3058
3059 static int __net_init ipv6_inetpeer_init(struct net *net)
3060 {
3061 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3062
3063 if (!bp)
3064 return -ENOMEM;
3065 inet_peer_base_init(bp);
3066 net->ipv6.peers = bp;
3067 return 0;
3068 }
3069
3070 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3071 {
3072 struct inet_peer_base *bp = net->ipv6.peers;
3073
3074 net->ipv6.peers = NULL;
3075 inetpeer_invalidate_tree(bp);
3076 kfree(bp);
3077 }
3078
3079 static struct pernet_operations ipv6_inetpeer_ops = {
3080 .init = ipv6_inetpeer_init,
3081 .exit = ipv6_inetpeer_exit,
3082 };
3083
3084 static struct pernet_operations ip6_route_net_late_ops = {
3085 .init = ip6_route_net_init_late,
3086 .exit = ip6_route_net_exit_late,
3087 };
3088
3089 static struct notifier_block ip6_route_dev_notifier = {
3090 .notifier_call = ip6_route_dev_notify,
3091 .priority = 0,
3092 };
3093
3094 int __init ip6_route_init(void)
3095 {
3096 int ret;
3097
3098 ret = -ENOMEM;
3099 ip6_dst_ops_template.kmem_cachep =
3100 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3101 SLAB_HWCACHE_ALIGN, NULL);
3102 if (!ip6_dst_ops_template.kmem_cachep)
3103 goto out;
3104
3105 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3106 if (ret)
3107 goto out_kmem_cache;
3108
3109 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3110 if (ret)
3111 goto out_dst_entries;
3112
3113 ret = register_pernet_subsys(&ip6_route_net_ops);
3114 if (ret)
3115 goto out_register_inetpeer;
3116
3117 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3118
3119 /* Registering of the loopback is done before this portion of code,
3120 * the loopback reference in rt6_info will not be taken, do it
3121 * manually for init_net */
3122 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3123 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3124 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3125 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3126 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3127 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3128 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3129 #endif
3130 ret = fib6_init();
3131 if (ret)
3132 goto out_register_subsys;
3133
3134 ret = xfrm6_init();
3135 if (ret)
3136 goto out_fib6_init;
3137
3138 ret = fib6_rules_init();
3139 if (ret)
3140 goto xfrm6_init;
3141
3142 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3143 if (ret)
3144 goto fib6_rules_init;
3145
3146 ret = -ENOBUFS;
3147 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3148 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3149 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3150 goto out_register_late_subsys;
3151
3152 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3153 if (ret)
3154 goto out_register_late_subsys;
3155
3156 out:
3157 return ret;
3158
3159 out_register_late_subsys:
3160 unregister_pernet_subsys(&ip6_route_net_late_ops);
3161 fib6_rules_init:
3162 fib6_rules_cleanup();
3163 xfrm6_init:
3164 xfrm6_fini();
3165 out_fib6_init:
3166 fib6_gc_cleanup();
3167 out_register_subsys:
3168 unregister_pernet_subsys(&ip6_route_net_ops);
3169 out_register_inetpeer:
3170 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3171 out_dst_entries:
3172 dst_entries_destroy(&ip6_dst_blackhole_ops);
3173 out_kmem_cache:
3174 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3175 goto out;
3176 }
3177
3178 void ip6_route_cleanup(void)
3179 {
3180 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3181 unregister_pernet_subsys(&ip6_route_net_late_ops);
3182 fib6_rules_cleanup();
3183 xfrm6_fini();
3184 fib6_gc_cleanup();
3185 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3186 unregister_pernet_subsys(&ip6_route_net_ops);
3187 dst_entries_destroy(&ip6_dst_blackhole_ops);
3188 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3189 }