vti6: better validate user provided tunnel names
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / net / ipv6 / ip6mr.c
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56
57 struct mr6_table {
58 struct list_head list;
59 possible_net_t net;
60 u32 id;
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
66 int maxvif;
67 atomic_t cache_resolve_queue_len;
68 bool mroute_do_assert;
69 bool mroute_do_pim;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
72 #endif
73 };
74
75 struct ip6mr_rule {
76 struct fib_rule common;
77 };
78
79 struct ip6mr_result {
80 struct mr6_table *mrt;
81 };
82
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
85 */
86
87 static DEFINE_RWLOCK(mrt_lock);
88
89 /*
90 * Multicast router control variables
91 */
92
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
94
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
97
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
102
103 In this case data path is free of exclusive locks at all.
104 */
105
106 static struct kmem_cache *mrt_cachep __read_mostly;
107
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
110
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 int cmd);
119 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
120 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
121 struct netlink_callback *cb);
122 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
123 static void ipmr_expire_process(unsigned long arg);
124
125 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
126 #define ip6mr_for_each_table(mrt, net) \
127 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128
129 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 {
131 struct mr6_table *mrt;
132
133 ip6mr_for_each_table(mrt, net) {
134 if (mrt->id == id)
135 return mrt;
136 }
137 return NULL;
138 }
139
140 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
141 struct mr6_table **mrt)
142 {
143 int err;
144 struct ip6mr_result res;
145 struct fib_lookup_arg arg = {
146 .result = &res,
147 .flags = FIB_LOOKUP_NOREF,
148 };
149
150 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
151 flowi6_to_flowi(flp6), 0, &arg);
152 if (err < 0)
153 return err;
154 *mrt = res.mrt;
155 return 0;
156 }
157
158 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
159 int flags, struct fib_lookup_arg *arg)
160 {
161 struct ip6mr_result *res = arg->result;
162 struct mr6_table *mrt;
163
164 switch (rule->action) {
165 case FR_ACT_TO_TBL:
166 break;
167 case FR_ACT_UNREACHABLE:
168 return -ENETUNREACH;
169 case FR_ACT_PROHIBIT:
170 return -EACCES;
171 case FR_ACT_BLACKHOLE:
172 default:
173 return -EINVAL;
174 }
175
176 mrt = ip6mr_get_table(rule->fr_net, rule->table);
177 if (!mrt)
178 return -EAGAIN;
179 res->mrt = mrt;
180 return 0;
181 }
182
183 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
184 {
185 return 1;
186 }
187
188 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
189 FRA_GENERIC_POLICY,
190 };
191
192 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
193 struct fib_rule_hdr *frh, struct nlattr **tb)
194 {
195 return 0;
196 }
197
198 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
199 struct nlattr **tb)
200 {
201 return 1;
202 }
203
204 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
205 struct fib_rule_hdr *frh)
206 {
207 frh->dst_len = 0;
208 frh->src_len = 0;
209 frh->tos = 0;
210 return 0;
211 }
212
213 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
214 .family = RTNL_FAMILY_IP6MR,
215 .rule_size = sizeof(struct ip6mr_rule),
216 .addr_size = sizeof(struct in6_addr),
217 .action = ip6mr_rule_action,
218 .match = ip6mr_rule_match,
219 .configure = ip6mr_rule_configure,
220 .compare = ip6mr_rule_compare,
221 .fill = ip6mr_rule_fill,
222 .nlgroup = RTNLGRP_IPV6_RULE,
223 .policy = ip6mr_rule_policy,
224 .owner = THIS_MODULE,
225 };
226
227 static int __net_init ip6mr_rules_init(struct net *net)
228 {
229 struct fib_rules_ops *ops;
230 struct mr6_table *mrt;
231 int err;
232
233 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
234 if (IS_ERR(ops))
235 return PTR_ERR(ops);
236
237 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
238
239 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
240 if (!mrt) {
241 err = -ENOMEM;
242 goto err1;
243 }
244
245 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
246 if (err < 0)
247 goto err2;
248
249 net->ipv6.mr6_rules_ops = ops;
250 return 0;
251
252 err2:
253 ip6mr_free_table(mrt);
254 err1:
255 fib_rules_unregister(ops);
256 return err;
257 }
258
259 static void __net_exit ip6mr_rules_exit(struct net *net)
260 {
261 struct mr6_table *mrt, *next;
262
263 rtnl_lock();
264 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 list_del(&mrt->list);
266 ip6mr_free_table(mrt);
267 }
268 fib_rules_unregister(net->ipv6.mr6_rules_ops);
269 rtnl_unlock();
270 }
271 #else
272 #define ip6mr_for_each_table(mrt, net) \
273 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
274
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
276 {
277 return net->ipv6.mrt6;
278 }
279
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 struct mr6_table **mrt)
282 {
283 *mrt = net->ipv6.mrt6;
284 return 0;
285 }
286
287 static int __net_init ip6mr_rules_init(struct net *net)
288 {
289 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 return net->ipv6.mrt6 ? 0 : -ENOMEM;
291 }
292
293 static void __net_exit ip6mr_rules_exit(struct net *net)
294 {
295 rtnl_lock();
296 ip6mr_free_table(net->ipv6.mrt6);
297 net->ipv6.mrt6 = NULL;
298 rtnl_unlock();
299 }
300 #endif
301
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
303 {
304 struct mr6_table *mrt;
305 unsigned int i;
306
307 mrt = ip6mr_get_table(net, id);
308 if (mrt)
309 return mrt;
310
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 if (!mrt)
313 return NULL;
314 mrt->id = id;
315 write_pnet(&mrt->net, net);
316
317 /* Forwarding cache */
318 for (i = 0; i < MFC6_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
320
321 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
322
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
324 (unsigned long)mrt);
325
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 mrt->mroute_reg_vif_num = -1;
328 #endif
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
331 #endif
332 return mrt;
333 }
334
335 static void ip6mr_free_table(struct mr6_table *mrt)
336 {
337 del_timer_sync(&mrt->ipmr_expire_timer);
338 mroute_clean_tables(mrt, true);
339 kfree(mrt);
340 }
341
342 #ifdef CONFIG_PROC_FS
343
344 struct ipmr_mfc_iter {
345 struct seq_net_private p;
346 struct mr6_table *mrt;
347 struct list_head *cache;
348 int ct;
349 };
350
351
352 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 struct ipmr_mfc_iter *it, loff_t pos)
354 {
355 struct mr6_table *mrt = it->mrt;
356 struct mfc6_cache *mfc;
357
358 read_lock(&mrt_lock);
359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
360 it->cache = &mrt->mfc6_cache_array[it->ct];
361 list_for_each_entry(mfc, it->cache, list)
362 if (pos-- == 0)
363 return mfc;
364 }
365 read_unlock(&mrt_lock);
366
367 spin_lock_bh(&mfc_unres_lock);
368 it->cache = &mrt->mfc6_unres_queue;
369 list_for_each_entry(mfc, it->cache, list)
370 if (pos-- == 0)
371 return mfc;
372 spin_unlock_bh(&mfc_unres_lock);
373
374 it->cache = NULL;
375 return NULL;
376 }
377
378 /*
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
380 */
381
382 struct ipmr_vif_iter {
383 struct seq_net_private p;
384 struct mr6_table *mrt;
385 int ct;
386 };
387
388 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 struct ipmr_vif_iter *iter,
390 loff_t pos)
391 {
392 struct mr6_table *mrt = iter->mrt;
393
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 if (!MIF_EXISTS(mrt, iter->ct))
396 continue;
397 if (pos-- == 0)
398 return &mrt->vif6_table[iter->ct];
399 }
400 return NULL;
401 }
402
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 __acquires(mrt_lock)
405 {
406 struct ipmr_vif_iter *iter = seq->private;
407 struct net *net = seq_file_net(seq);
408 struct mr6_table *mrt;
409
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 if (!mrt)
412 return ERR_PTR(-ENOENT);
413
414 iter->mrt = mrt;
415
416 read_lock(&mrt_lock);
417 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
418 : SEQ_START_TOKEN;
419 }
420
421 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
422 {
423 struct ipmr_vif_iter *iter = seq->private;
424 struct net *net = seq_file_net(seq);
425 struct mr6_table *mrt = iter->mrt;
426
427 ++*pos;
428 if (v == SEQ_START_TOKEN)
429 return ip6mr_vif_seq_idx(net, iter, 0);
430
431 while (++iter->ct < mrt->maxvif) {
432 if (!MIF_EXISTS(mrt, iter->ct))
433 continue;
434 return &mrt->vif6_table[iter->ct];
435 }
436 return NULL;
437 }
438
439 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
440 __releases(mrt_lock)
441 {
442 read_unlock(&mrt_lock);
443 }
444
445 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
446 {
447 struct ipmr_vif_iter *iter = seq->private;
448 struct mr6_table *mrt = iter->mrt;
449
450 if (v == SEQ_START_TOKEN) {
451 seq_puts(seq,
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
453 } else {
454 const struct mif_device *vif = v;
455 const char *name = vif->dev ? vif->dev->name : "none";
456
457 seq_printf(seq,
458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 vif - mrt->vif6_table,
460 name, vif->bytes_in, vif->pkt_in,
461 vif->bytes_out, vif->pkt_out,
462 vif->flags);
463 }
464 return 0;
465 }
466
467 static const struct seq_operations ip6mr_vif_seq_ops = {
468 .start = ip6mr_vif_seq_start,
469 .next = ip6mr_vif_seq_next,
470 .stop = ip6mr_vif_seq_stop,
471 .show = ip6mr_vif_seq_show,
472 };
473
474 static int ip6mr_vif_open(struct inode *inode, struct file *file)
475 {
476 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 sizeof(struct ipmr_vif_iter));
478 }
479
480 static const struct file_operations ip6mr_vif_fops = {
481 .owner = THIS_MODULE,
482 .open = ip6mr_vif_open,
483 .read = seq_read,
484 .llseek = seq_lseek,
485 .release = seq_release_net,
486 };
487
488 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
489 {
490 struct ipmr_mfc_iter *it = seq->private;
491 struct net *net = seq_file_net(seq);
492 struct mr6_table *mrt;
493
494 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
495 if (!mrt)
496 return ERR_PTR(-ENOENT);
497
498 it->mrt = mrt;
499 it->cache = NULL;
500 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
501 : SEQ_START_TOKEN;
502 }
503
504 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505 {
506 struct mfc6_cache *mfc = v;
507 struct ipmr_mfc_iter *it = seq->private;
508 struct net *net = seq_file_net(seq);
509 struct mr6_table *mrt = it->mrt;
510
511 ++*pos;
512
513 if (v == SEQ_START_TOKEN)
514 return ipmr_mfc_seq_idx(net, seq->private, 0);
515
516 if (mfc->list.next != it->cache)
517 return list_entry(mfc->list.next, struct mfc6_cache, list);
518
519 if (it->cache == &mrt->mfc6_unres_queue)
520 goto end_of_list;
521
522 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
523
524 while (++it->ct < MFC6_LINES) {
525 it->cache = &mrt->mfc6_cache_array[it->ct];
526 if (list_empty(it->cache))
527 continue;
528 return list_first_entry(it->cache, struct mfc6_cache, list);
529 }
530
531 /* exhausted cache_array, show unresolved */
532 read_unlock(&mrt_lock);
533 it->cache = &mrt->mfc6_unres_queue;
534 it->ct = 0;
535
536 spin_lock_bh(&mfc_unres_lock);
537 if (!list_empty(it->cache))
538 return list_first_entry(it->cache, struct mfc6_cache, list);
539
540 end_of_list:
541 spin_unlock_bh(&mfc_unres_lock);
542 it->cache = NULL;
543
544 return NULL;
545 }
546
547 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
548 {
549 struct ipmr_mfc_iter *it = seq->private;
550 struct mr6_table *mrt = it->mrt;
551
552 if (it->cache == &mrt->mfc6_unres_queue)
553 spin_unlock_bh(&mfc_unres_lock);
554 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
555 read_unlock(&mrt_lock);
556 }
557
558 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
559 {
560 int n;
561
562 if (v == SEQ_START_TOKEN) {
563 seq_puts(seq,
564 "Group "
565 "Origin "
566 "Iif Pkts Bytes Wrong Oifs\n");
567 } else {
568 const struct mfc6_cache *mfc = v;
569 const struct ipmr_mfc_iter *it = seq->private;
570 struct mr6_table *mrt = it->mrt;
571
572 seq_printf(seq, "%pI6 %pI6 %-3hd",
573 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
574 mfc->mf6c_parent);
575
576 if (it->cache != &mrt->mfc6_unres_queue) {
577 seq_printf(seq, " %8lu %8lu %8lu",
578 mfc->mfc_un.res.pkt,
579 mfc->mfc_un.res.bytes,
580 mfc->mfc_un.res.wrong_if);
581 for (n = mfc->mfc_un.res.minvif;
582 n < mfc->mfc_un.res.maxvif; n++) {
583 if (MIF_EXISTS(mrt, n) &&
584 mfc->mfc_un.res.ttls[n] < 255)
585 seq_printf(seq,
586 " %2d:%-3d",
587 n, mfc->mfc_un.res.ttls[n]);
588 }
589 } else {
590 /* unresolved mfc_caches don't contain
591 * pkt, bytes and wrong_if values
592 */
593 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
594 }
595 seq_putc(seq, '\n');
596 }
597 return 0;
598 }
599
600 static const struct seq_operations ipmr_mfc_seq_ops = {
601 .start = ipmr_mfc_seq_start,
602 .next = ipmr_mfc_seq_next,
603 .stop = ipmr_mfc_seq_stop,
604 .show = ipmr_mfc_seq_show,
605 };
606
607 static int ipmr_mfc_open(struct inode *inode, struct file *file)
608 {
609 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
610 sizeof(struct ipmr_mfc_iter));
611 }
612
613 static const struct file_operations ip6mr_mfc_fops = {
614 .owner = THIS_MODULE,
615 .open = ipmr_mfc_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = seq_release_net,
619 };
620 #endif
621
622 #ifdef CONFIG_IPV6_PIMSM_V2
623
624 static int pim6_rcv(struct sk_buff *skb)
625 {
626 struct pimreghdr *pim;
627 struct ipv6hdr *encap;
628 struct net_device *reg_dev = NULL;
629 struct net *net = dev_net(skb->dev);
630 struct mr6_table *mrt;
631 struct flowi6 fl6 = {
632 .flowi6_iif = skb->dev->ifindex,
633 .flowi6_mark = skb->mark,
634 };
635 int reg_vif_num;
636
637 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
638 goto drop;
639
640 pim = (struct pimreghdr *)skb_transport_header(skb);
641 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
642 (pim->flags & PIM_NULL_REGISTER) ||
643 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
644 sizeof(*pim), IPPROTO_PIM,
645 csum_partial((void *)pim, sizeof(*pim), 0)) &&
646 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
647 goto drop;
648
649 /* check if the inner packet is destined to mcast group */
650 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
651 sizeof(*pim));
652
653 if (!ipv6_addr_is_multicast(&encap->daddr) ||
654 encap->payload_len == 0 ||
655 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
656 goto drop;
657
658 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
659 goto drop;
660 reg_vif_num = mrt->mroute_reg_vif_num;
661
662 read_lock(&mrt_lock);
663 if (reg_vif_num >= 0)
664 reg_dev = mrt->vif6_table[reg_vif_num].dev;
665 if (reg_dev)
666 dev_hold(reg_dev);
667 read_unlock(&mrt_lock);
668
669 if (!reg_dev)
670 goto drop;
671
672 skb->mac_header = skb->network_header;
673 skb_pull(skb, (u8 *)encap - skb->data);
674 skb_reset_network_header(skb);
675 skb->protocol = htons(ETH_P_IPV6);
676 skb->ip_summed = CHECKSUM_NONE;
677
678 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
679
680 netif_rx(skb);
681
682 dev_put(reg_dev);
683 return 0;
684 drop:
685 kfree_skb(skb);
686 return 0;
687 }
688
689 static const struct inet6_protocol pim6_protocol = {
690 .handler = pim6_rcv,
691 };
692
693 /* Service routines creating virtual interfaces: PIMREG */
694
695 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 struct net_device *dev)
697 {
698 struct net *net = dev_net(dev);
699 struct mr6_table *mrt;
700 struct flowi6 fl6 = {
701 .flowi6_oif = dev->ifindex,
702 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
703 .flowi6_mark = skb->mark,
704 };
705 int err;
706
707 err = ip6mr_fib_lookup(net, &fl6, &mrt);
708 if (err < 0) {
709 kfree_skb(skb);
710 return err;
711 }
712
713 read_lock(&mrt_lock);
714 dev->stats.tx_bytes += skb->len;
715 dev->stats.tx_packets++;
716 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
717 read_unlock(&mrt_lock);
718 kfree_skb(skb);
719 return NETDEV_TX_OK;
720 }
721
722 static int reg_vif_get_iflink(const struct net_device *dev)
723 {
724 return 0;
725 }
726
727 static const struct net_device_ops reg_vif_netdev_ops = {
728 .ndo_start_xmit = reg_vif_xmit,
729 .ndo_get_iflink = reg_vif_get_iflink,
730 };
731
732 static void reg_vif_setup(struct net_device *dev)
733 {
734 dev->type = ARPHRD_PIMREG;
735 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
736 dev->flags = IFF_NOARP;
737 dev->netdev_ops = &reg_vif_netdev_ops;
738 dev->needs_free_netdev = true;
739 dev->features |= NETIF_F_NETNS_LOCAL;
740 }
741
742 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
743 {
744 struct net_device *dev;
745 char name[IFNAMSIZ];
746
747 if (mrt->id == RT6_TABLE_DFLT)
748 sprintf(name, "pim6reg");
749 else
750 sprintf(name, "pim6reg%u", mrt->id);
751
752 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
753 if (!dev)
754 return NULL;
755
756 dev_net_set(dev, net);
757
758 if (register_netdevice(dev)) {
759 free_netdev(dev);
760 return NULL;
761 }
762
763 if (dev_open(dev))
764 goto failure;
765
766 dev_hold(dev);
767 return dev;
768
769 failure:
770 unregister_netdevice(dev);
771 return NULL;
772 }
773 #endif
774
775 /*
776 * Delete a VIF entry
777 */
778
779 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
780 struct list_head *head)
781 {
782 struct mif_device *v;
783 struct net_device *dev;
784 struct inet6_dev *in6_dev;
785
786 if (vifi < 0 || vifi >= mrt->maxvif)
787 return -EADDRNOTAVAIL;
788
789 v = &mrt->vif6_table[vifi];
790
791 write_lock_bh(&mrt_lock);
792 dev = v->dev;
793 v->dev = NULL;
794
795 if (!dev) {
796 write_unlock_bh(&mrt_lock);
797 return -EADDRNOTAVAIL;
798 }
799
800 #ifdef CONFIG_IPV6_PIMSM_V2
801 if (vifi == mrt->mroute_reg_vif_num)
802 mrt->mroute_reg_vif_num = -1;
803 #endif
804
805 if (vifi + 1 == mrt->maxvif) {
806 int tmp;
807 for (tmp = vifi - 1; tmp >= 0; tmp--) {
808 if (MIF_EXISTS(mrt, tmp))
809 break;
810 }
811 mrt->maxvif = tmp + 1;
812 }
813
814 write_unlock_bh(&mrt_lock);
815
816 dev_set_allmulti(dev, -1);
817
818 in6_dev = __in6_dev_get(dev);
819 if (in6_dev) {
820 in6_dev->cnf.mc_forwarding--;
821 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
822 NETCONFA_MC_FORWARDING,
823 dev->ifindex, &in6_dev->cnf);
824 }
825
826 if ((v->flags & MIFF_REGISTER) && !notify)
827 unregister_netdevice_queue(dev, head);
828
829 dev_put(dev);
830 return 0;
831 }
832
833 static inline void ip6mr_cache_free(struct mfc6_cache *c)
834 {
835 kmem_cache_free(mrt_cachep, c);
836 }
837
838 /* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
840 */
841
842 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
843 {
844 struct net *net = read_pnet(&mrt->net);
845 struct sk_buff *skb;
846
847 atomic_dec(&mrt->cache_resolve_queue_len);
848
849 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
850 if (ipv6_hdr(skb)->version == 0) {
851 struct nlmsghdr *nlh = skb_pull(skb,
852 sizeof(struct ipv6hdr));
853 nlh->nlmsg_type = NLMSG_ERROR;
854 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
855 skb_trim(skb, nlh->nlmsg_len);
856 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
857 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
858 } else
859 kfree_skb(skb);
860 }
861
862 ip6mr_cache_free(c);
863 }
864
865
866 /* Timer process for all the unresolved queue. */
867
868 static void ipmr_do_expire_process(struct mr6_table *mrt)
869 {
870 unsigned long now = jiffies;
871 unsigned long expires = 10 * HZ;
872 struct mfc6_cache *c, *next;
873
874 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
875 if (time_after(c->mfc_un.unres.expires, now)) {
876 /* not yet... */
877 unsigned long interval = c->mfc_un.unres.expires - now;
878 if (interval < expires)
879 expires = interval;
880 continue;
881 }
882
883 list_del(&c->list);
884 mr6_netlink_event(mrt, c, RTM_DELROUTE);
885 ip6mr_destroy_unres(mrt, c);
886 }
887
888 if (!list_empty(&mrt->mfc6_unres_queue))
889 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
890 }
891
892 static void ipmr_expire_process(unsigned long arg)
893 {
894 struct mr6_table *mrt = (struct mr6_table *)arg;
895
896 if (!spin_trylock(&mfc_unres_lock)) {
897 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
898 return;
899 }
900
901 if (!list_empty(&mrt->mfc6_unres_queue))
902 ipmr_do_expire_process(mrt);
903
904 spin_unlock(&mfc_unres_lock);
905 }
906
907 /* Fill oifs list. It is called under write locked mrt_lock. */
908
909 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
910 unsigned char *ttls)
911 {
912 int vifi;
913
914 cache->mfc_un.res.minvif = MAXMIFS;
915 cache->mfc_un.res.maxvif = 0;
916 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
917
918 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
919 if (MIF_EXISTS(mrt, vifi) &&
920 ttls[vifi] && ttls[vifi] < 255) {
921 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
922 if (cache->mfc_un.res.minvif > vifi)
923 cache->mfc_un.res.minvif = vifi;
924 if (cache->mfc_un.res.maxvif <= vifi)
925 cache->mfc_un.res.maxvif = vifi + 1;
926 }
927 }
928 cache->mfc_un.res.lastuse = jiffies;
929 }
930
931 static int mif6_add(struct net *net, struct mr6_table *mrt,
932 struct mif6ctl *vifc, int mrtsock)
933 {
934 int vifi = vifc->mif6c_mifi;
935 struct mif_device *v = &mrt->vif6_table[vifi];
936 struct net_device *dev;
937 struct inet6_dev *in6_dev;
938 int err;
939
940 /* Is vif busy ? */
941 if (MIF_EXISTS(mrt, vifi))
942 return -EADDRINUSE;
943
944 switch (vifc->mif6c_flags) {
945 #ifdef CONFIG_IPV6_PIMSM_V2
946 case MIFF_REGISTER:
947 /*
948 * Special Purpose VIF in PIM
949 * All the packets will be sent to the daemon
950 */
951 if (mrt->mroute_reg_vif_num >= 0)
952 return -EADDRINUSE;
953 dev = ip6mr_reg_vif(net, mrt);
954 if (!dev)
955 return -ENOBUFS;
956 err = dev_set_allmulti(dev, 1);
957 if (err) {
958 unregister_netdevice(dev);
959 dev_put(dev);
960 return err;
961 }
962 break;
963 #endif
964 case 0:
965 dev = dev_get_by_index(net, vifc->mif6c_pifi);
966 if (!dev)
967 return -EADDRNOTAVAIL;
968 err = dev_set_allmulti(dev, 1);
969 if (err) {
970 dev_put(dev);
971 return err;
972 }
973 break;
974 default:
975 return -EINVAL;
976 }
977
978 in6_dev = __in6_dev_get(dev);
979 if (in6_dev) {
980 in6_dev->cnf.mc_forwarding++;
981 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
982 NETCONFA_MC_FORWARDING,
983 dev->ifindex, &in6_dev->cnf);
984 }
985
986 /*
987 * Fill in the VIF structures
988 */
989 v->rate_limit = vifc->vifc_rate_limit;
990 v->flags = vifc->mif6c_flags;
991 if (!mrtsock)
992 v->flags |= VIFF_STATIC;
993 v->threshold = vifc->vifc_threshold;
994 v->bytes_in = 0;
995 v->bytes_out = 0;
996 v->pkt_in = 0;
997 v->pkt_out = 0;
998 v->link = dev->ifindex;
999 if (v->flags & MIFF_REGISTER)
1000 v->link = dev_get_iflink(dev);
1001
1002 /* And finish update writing critical data */
1003 write_lock_bh(&mrt_lock);
1004 v->dev = dev;
1005 #ifdef CONFIG_IPV6_PIMSM_V2
1006 if (v->flags & MIFF_REGISTER)
1007 mrt->mroute_reg_vif_num = vifi;
1008 #endif
1009 if (vifi + 1 > mrt->maxvif)
1010 mrt->maxvif = vifi + 1;
1011 write_unlock_bh(&mrt_lock);
1012 return 0;
1013 }
1014
1015 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1016 const struct in6_addr *origin,
1017 const struct in6_addr *mcastgrp)
1018 {
1019 int line = MFC6_HASH(mcastgrp, origin);
1020 struct mfc6_cache *c;
1021
1022 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1023 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1024 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1025 return c;
1026 }
1027 return NULL;
1028 }
1029
1030 /* Look for a (*,*,oif) entry */
1031 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1032 mifi_t mifi)
1033 {
1034 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1035 struct mfc6_cache *c;
1036
1037 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1038 if (ipv6_addr_any(&c->mf6c_origin) &&
1039 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1040 (c->mfc_un.res.ttls[mifi] < 255))
1041 return c;
1042
1043 return NULL;
1044 }
1045
1046 /* Look for a (*,G) entry */
1047 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1048 struct in6_addr *mcastgrp,
1049 mifi_t mifi)
1050 {
1051 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1052 struct mfc6_cache *c, *proxy;
1053
1054 if (ipv6_addr_any(mcastgrp))
1055 goto skip;
1056
1057 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1058 if (ipv6_addr_any(&c->mf6c_origin) &&
1059 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1060 if (c->mfc_un.res.ttls[mifi] < 255)
1061 return c;
1062
1063 /* It's ok if the mifi is part of the static tree */
1064 proxy = ip6mr_cache_find_any_parent(mrt,
1065 c->mf6c_parent);
1066 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1067 return c;
1068 }
1069
1070 skip:
1071 return ip6mr_cache_find_any_parent(mrt, mifi);
1072 }
1073
1074 /*
1075 * Allocate a multicast cache entry
1076 */
1077 static struct mfc6_cache *ip6mr_cache_alloc(void)
1078 {
1079 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1080 if (!c)
1081 return NULL;
1082 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1083 c->mfc_un.res.minvif = MAXMIFS;
1084 return c;
1085 }
1086
1087 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1088 {
1089 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1090 if (!c)
1091 return NULL;
1092 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1093 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1094 return c;
1095 }
1096
1097 /*
1098 * A cache entry has gone into a resolved state from queued
1099 */
1100
1101 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1102 struct mfc6_cache *uc, struct mfc6_cache *c)
1103 {
1104 struct sk_buff *skb;
1105
1106 /*
1107 * Play the pending entries through our router
1108 */
1109
1110 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1111 if (ipv6_hdr(skb)->version == 0) {
1112 struct nlmsghdr *nlh = skb_pull(skb,
1113 sizeof(struct ipv6hdr));
1114
1115 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1116 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1117 } else {
1118 nlh->nlmsg_type = NLMSG_ERROR;
1119 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1120 skb_trim(skb, nlh->nlmsg_len);
1121 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1122 }
1123 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1124 } else
1125 ip6_mr_forward(net, mrt, skb, c);
1126 }
1127 }
1128
1129 /*
1130 * Bounce a cache query up to pim6sd and netlink.
1131 *
1132 * Called under mrt_lock.
1133 */
1134
1135 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1136 mifi_t mifi, int assert)
1137 {
1138 struct sk_buff *skb;
1139 struct mrt6msg *msg;
1140 int ret;
1141
1142 #ifdef CONFIG_IPV6_PIMSM_V2
1143 if (assert == MRT6MSG_WHOLEPKT)
1144 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1145 +sizeof(*msg));
1146 else
1147 #endif
1148 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1149
1150 if (!skb)
1151 return -ENOBUFS;
1152
1153 /* I suppose that internal messages
1154 * do not require checksums */
1155
1156 skb->ip_summed = CHECKSUM_UNNECESSARY;
1157
1158 #ifdef CONFIG_IPV6_PIMSM_V2
1159 if (assert == MRT6MSG_WHOLEPKT) {
1160 /* Ugly, but we have no choice with this interface.
1161 Duplicate old header, fix length etc.
1162 And all this only to mangle msg->im6_msgtype and
1163 to set msg->im6_mbz to "mbz" :-)
1164 */
1165 skb_push(skb, -skb_network_offset(pkt));
1166
1167 skb_push(skb, sizeof(*msg));
1168 skb_reset_transport_header(skb);
1169 msg = (struct mrt6msg *)skb_transport_header(skb);
1170 msg->im6_mbz = 0;
1171 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1172 msg->im6_mif = mrt->mroute_reg_vif_num;
1173 msg->im6_pad = 0;
1174 msg->im6_src = ipv6_hdr(pkt)->saddr;
1175 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1176
1177 skb->ip_summed = CHECKSUM_UNNECESSARY;
1178 } else
1179 #endif
1180 {
1181 /*
1182 * Copy the IP header
1183 */
1184
1185 skb_put(skb, sizeof(struct ipv6hdr));
1186 skb_reset_network_header(skb);
1187 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1188
1189 /*
1190 * Add our header
1191 */
1192 skb_put(skb, sizeof(*msg));
1193 skb_reset_transport_header(skb);
1194 msg = (struct mrt6msg *)skb_transport_header(skb);
1195
1196 msg->im6_mbz = 0;
1197 msg->im6_msgtype = assert;
1198 msg->im6_mif = mifi;
1199 msg->im6_pad = 0;
1200 msg->im6_src = ipv6_hdr(pkt)->saddr;
1201 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1202
1203 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205 }
1206
1207 if (!mrt->mroute6_sk) {
1208 kfree_skb(skb);
1209 return -EINVAL;
1210 }
1211
1212 mrt6msg_netlink_event(mrt, skb);
1213
1214 /*
1215 * Deliver to user space multicast routing algorithms
1216 */
1217 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1218 if (ret < 0) {
1219 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1220 kfree_skb(skb);
1221 }
1222
1223 return ret;
1224 }
1225
1226 /*
1227 * Queue a packet for resolution. It gets locked cache entry!
1228 */
1229
1230 static int
1231 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1232 {
1233 bool found = false;
1234 int err;
1235 struct mfc6_cache *c;
1236
1237 spin_lock_bh(&mfc_unres_lock);
1238 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1239 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1240 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1241 found = true;
1242 break;
1243 }
1244 }
1245
1246 if (!found) {
1247 /*
1248 * Create a new entry if allowable
1249 */
1250
1251 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1252 (c = ip6mr_cache_alloc_unres()) == NULL) {
1253 spin_unlock_bh(&mfc_unres_lock);
1254
1255 kfree_skb(skb);
1256 return -ENOBUFS;
1257 }
1258
1259 /*
1260 * Fill in the new cache entry
1261 */
1262 c->mf6c_parent = -1;
1263 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1264 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1265
1266 /*
1267 * Reflect first query at pim6sd
1268 */
1269 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1270 if (err < 0) {
1271 /* If the report failed throw the cache entry
1272 out - Brad Parker
1273 */
1274 spin_unlock_bh(&mfc_unres_lock);
1275
1276 ip6mr_cache_free(c);
1277 kfree_skb(skb);
1278 return err;
1279 }
1280
1281 atomic_inc(&mrt->cache_resolve_queue_len);
1282 list_add(&c->list, &mrt->mfc6_unres_queue);
1283 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1284
1285 ipmr_do_expire_process(mrt);
1286 }
1287
1288 /*
1289 * See if we can append the packet
1290 */
1291 if (c->mfc_un.unres.unresolved.qlen > 3) {
1292 kfree_skb(skb);
1293 err = -ENOBUFS;
1294 } else {
1295 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1296 err = 0;
1297 }
1298
1299 spin_unlock_bh(&mfc_unres_lock);
1300 return err;
1301 }
1302
1303 /*
1304 * MFC6 cache manipulation by user space
1305 */
1306
1307 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1308 int parent)
1309 {
1310 int line;
1311 struct mfc6_cache *c, *next;
1312
1313 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1314
1315 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1316 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1317 ipv6_addr_equal(&c->mf6c_mcastgrp,
1318 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1319 (parent == -1 || parent == c->mf6c_parent)) {
1320 write_lock_bh(&mrt_lock);
1321 list_del(&c->list);
1322 write_unlock_bh(&mrt_lock);
1323
1324 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1325 ip6mr_cache_free(c);
1326 return 0;
1327 }
1328 }
1329 return -ENOENT;
1330 }
1331
1332 static int ip6mr_device_event(struct notifier_block *this,
1333 unsigned long event, void *ptr)
1334 {
1335 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1336 struct net *net = dev_net(dev);
1337 struct mr6_table *mrt;
1338 struct mif_device *v;
1339 int ct;
1340
1341 if (event != NETDEV_UNREGISTER)
1342 return NOTIFY_DONE;
1343
1344 ip6mr_for_each_table(mrt, net) {
1345 v = &mrt->vif6_table[0];
1346 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1347 if (v->dev == dev)
1348 mif6_delete(mrt, ct, 1, NULL);
1349 }
1350 }
1351
1352 return NOTIFY_DONE;
1353 }
1354
1355 static struct notifier_block ip6_mr_notifier = {
1356 .notifier_call = ip6mr_device_event
1357 };
1358
1359 /*
1360 * Setup for IP multicast routing
1361 */
1362
1363 static int __net_init ip6mr_net_init(struct net *net)
1364 {
1365 int err;
1366
1367 err = ip6mr_rules_init(net);
1368 if (err < 0)
1369 goto fail;
1370
1371 #ifdef CONFIG_PROC_FS
1372 err = -ENOMEM;
1373 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1374 goto proc_vif_fail;
1375 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1376 goto proc_cache_fail;
1377 #endif
1378
1379 return 0;
1380
1381 #ifdef CONFIG_PROC_FS
1382 proc_cache_fail:
1383 remove_proc_entry("ip6_mr_vif", net->proc_net);
1384 proc_vif_fail:
1385 ip6mr_rules_exit(net);
1386 #endif
1387 fail:
1388 return err;
1389 }
1390
1391 static void __net_exit ip6mr_net_exit(struct net *net)
1392 {
1393 #ifdef CONFIG_PROC_FS
1394 remove_proc_entry("ip6_mr_cache", net->proc_net);
1395 remove_proc_entry("ip6_mr_vif", net->proc_net);
1396 #endif
1397 ip6mr_rules_exit(net);
1398 }
1399
1400 static struct pernet_operations ip6mr_net_ops = {
1401 .init = ip6mr_net_init,
1402 .exit = ip6mr_net_exit,
1403 };
1404
1405 int __init ip6_mr_init(void)
1406 {
1407 int err;
1408
1409 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1410 sizeof(struct mfc6_cache),
1411 0, SLAB_HWCACHE_ALIGN,
1412 NULL);
1413 if (!mrt_cachep)
1414 return -ENOMEM;
1415
1416 err = register_pernet_subsys(&ip6mr_net_ops);
1417 if (err)
1418 goto reg_pernet_fail;
1419
1420 err = register_netdevice_notifier(&ip6_mr_notifier);
1421 if (err)
1422 goto reg_notif_fail;
1423 #ifdef CONFIG_IPV6_PIMSM_V2
1424 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1425 pr_err("%s: can't add PIM protocol\n", __func__);
1426 err = -EAGAIN;
1427 goto add_proto_fail;
1428 }
1429 #endif
1430 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1431 ip6mr_rtm_dumproute, 0);
1432 return 0;
1433 #ifdef CONFIG_IPV6_PIMSM_V2
1434 add_proto_fail:
1435 unregister_netdevice_notifier(&ip6_mr_notifier);
1436 #endif
1437 reg_notif_fail:
1438 unregister_pernet_subsys(&ip6mr_net_ops);
1439 reg_pernet_fail:
1440 kmem_cache_destroy(mrt_cachep);
1441 return err;
1442 }
1443
1444 void ip6_mr_cleanup(void)
1445 {
1446 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1447 #ifdef CONFIG_IPV6_PIMSM_V2
1448 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1449 #endif
1450 unregister_netdevice_notifier(&ip6_mr_notifier);
1451 unregister_pernet_subsys(&ip6mr_net_ops);
1452 kmem_cache_destroy(mrt_cachep);
1453 }
1454
1455 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1456 struct mf6cctl *mfc, int mrtsock, int parent)
1457 {
1458 bool found = false;
1459 int line;
1460 struct mfc6_cache *uc, *c;
1461 unsigned char ttls[MAXMIFS];
1462 int i;
1463
1464 if (mfc->mf6cc_parent >= MAXMIFS)
1465 return -ENFILE;
1466
1467 memset(ttls, 255, MAXMIFS);
1468 for (i = 0; i < MAXMIFS; i++) {
1469 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1470 ttls[i] = 1;
1471
1472 }
1473
1474 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1475
1476 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1477 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1478 ipv6_addr_equal(&c->mf6c_mcastgrp,
1479 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1480 (parent == -1 || parent == mfc->mf6cc_parent)) {
1481 found = true;
1482 break;
1483 }
1484 }
1485
1486 if (found) {
1487 write_lock_bh(&mrt_lock);
1488 c->mf6c_parent = mfc->mf6cc_parent;
1489 ip6mr_update_thresholds(mrt, c, ttls);
1490 if (!mrtsock)
1491 c->mfc_flags |= MFC_STATIC;
1492 write_unlock_bh(&mrt_lock);
1493 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1494 return 0;
1495 }
1496
1497 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1498 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1499 return -EINVAL;
1500
1501 c = ip6mr_cache_alloc();
1502 if (!c)
1503 return -ENOMEM;
1504
1505 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1506 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1507 c->mf6c_parent = mfc->mf6cc_parent;
1508 ip6mr_update_thresholds(mrt, c, ttls);
1509 if (!mrtsock)
1510 c->mfc_flags |= MFC_STATIC;
1511
1512 write_lock_bh(&mrt_lock);
1513 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1514 write_unlock_bh(&mrt_lock);
1515
1516 /*
1517 * Check to see if we resolved a queued list. If so we
1518 * need to send on the frames and tidy up.
1519 */
1520 found = false;
1521 spin_lock_bh(&mfc_unres_lock);
1522 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1523 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1524 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1525 list_del(&uc->list);
1526 atomic_dec(&mrt->cache_resolve_queue_len);
1527 found = true;
1528 break;
1529 }
1530 }
1531 if (list_empty(&mrt->mfc6_unres_queue))
1532 del_timer(&mrt->ipmr_expire_timer);
1533 spin_unlock_bh(&mfc_unres_lock);
1534
1535 if (found) {
1536 ip6mr_cache_resolve(net, mrt, uc, c);
1537 ip6mr_cache_free(uc);
1538 }
1539 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1540 return 0;
1541 }
1542
1543 /*
1544 * Close the multicast socket, and clear the vif tables etc
1545 */
1546
1547 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1548 {
1549 int i;
1550 LIST_HEAD(list);
1551 struct mfc6_cache *c, *next;
1552
1553 /*
1554 * Shut down all active vif entries
1555 */
1556 for (i = 0; i < mrt->maxvif; i++) {
1557 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1558 continue;
1559 mif6_delete(mrt, i, 0, &list);
1560 }
1561 unregister_netdevice_many(&list);
1562
1563 /*
1564 * Wipe the cache
1565 */
1566 for (i = 0; i < MFC6_LINES; i++) {
1567 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1568 if (!all && (c->mfc_flags & MFC_STATIC))
1569 continue;
1570 write_lock_bh(&mrt_lock);
1571 list_del(&c->list);
1572 write_unlock_bh(&mrt_lock);
1573
1574 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1575 ip6mr_cache_free(c);
1576 }
1577 }
1578
1579 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1580 spin_lock_bh(&mfc_unres_lock);
1581 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1582 list_del(&c->list);
1583 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1584 ip6mr_destroy_unres(mrt, c);
1585 }
1586 spin_unlock_bh(&mfc_unres_lock);
1587 }
1588 }
1589
1590 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1591 {
1592 int err = 0;
1593 struct net *net = sock_net(sk);
1594
1595 rtnl_lock();
1596 write_lock_bh(&mrt_lock);
1597 if (likely(mrt->mroute6_sk == NULL)) {
1598 mrt->mroute6_sk = sk;
1599 net->ipv6.devconf_all->mc_forwarding++;
1600 } else {
1601 err = -EADDRINUSE;
1602 }
1603 write_unlock_bh(&mrt_lock);
1604
1605 if (!err)
1606 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1607 NETCONFA_MC_FORWARDING,
1608 NETCONFA_IFINDEX_ALL,
1609 net->ipv6.devconf_all);
1610 rtnl_unlock();
1611
1612 return err;
1613 }
1614
1615 int ip6mr_sk_done(struct sock *sk)
1616 {
1617 int err = -EACCES;
1618 struct net *net = sock_net(sk);
1619 struct mr6_table *mrt;
1620
1621 rtnl_lock();
1622 ip6mr_for_each_table(mrt, net) {
1623 if (sk == mrt->mroute6_sk) {
1624 write_lock_bh(&mrt_lock);
1625 mrt->mroute6_sk = NULL;
1626 net->ipv6.devconf_all->mc_forwarding--;
1627 write_unlock_bh(&mrt_lock);
1628 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1629 NETCONFA_MC_FORWARDING,
1630 NETCONFA_IFINDEX_ALL,
1631 net->ipv6.devconf_all);
1632
1633 mroute_clean_tables(mrt, false);
1634 err = 0;
1635 break;
1636 }
1637 }
1638 rtnl_unlock();
1639
1640 return err;
1641 }
1642
1643 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1644 {
1645 struct mr6_table *mrt;
1646 struct flowi6 fl6 = {
1647 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1648 .flowi6_oif = skb->dev->ifindex,
1649 .flowi6_mark = skb->mark,
1650 };
1651
1652 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1653 return NULL;
1654
1655 return mrt->mroute6_sk;
1656 }
1657
1658 /*
1659 * Socket options and virtual interface manipulation. The whole
1660 * virtual interface system is a complete heap, but unfortunately
1661 * that's how BSD mrouted happens to think. Maybe one day with a proper
1662 * MOSPF/PIM router set up we can clean this up.
1663 */
1664
1665 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1666 {
1667 int ret, parent = 0;
1668 struct mif6ctl vif;
1669 struct mf6cctl mfc;
1670 mifi_t mifi;
1671 struct net *net = sock_net(sk);
1672 struct mr6_table *mrt;
1673
1674 if (sk->sk_type != SOCK_RAW ||
1675 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1676 return -EOPNOTSUPP;
1677
1678 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1679 if (!mrt)
1680 return -ENOENT;
1681
1682 if (optname != MRT6_INIT) {
1683 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1684 return -EACCES;
1685 }
1686
1687 switch (optname) {
1688 case MRT6_INIT:
1689 if (optlen < sizeof(int))
1690 return -EINVAL;
1691
1692 return ip6mr_sk_init(mrt, sk);
1693
1694 case MRT6_DONE:
1695 return ip6mr_sk_done(sk);
1696
1697 case MRT6_ADD_MIF:
1698 if (optlen < sizeof(vif))
1699 return -EINVAL;
1700 if (copy_from_user(&vif, optval, sizeof(vif)))
1701 return -EFAULT;
1702 if (vif.mif6c_mifi >= MAXMIFS)
1703 return -ENFILE;
1704 rtnl_lock();
1705 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1706 rtnl_unlock();
1707 return ret;
1708
1709 case MRT6_DEL_MIF:
1710 if (optlen < sizeof(mifi_t))
1711 return -EINVAL;
1712 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1713 return -EFAULT;
1714 rtnl_lock();
1715 ret = mif6_delete(mrt, mifi, 0, NULL);
1716 rtnl_unlock();
1717 return ret;
1718
1719 /*
1720 * Manipulate the forwarding caches. These live
1721 * in a sort of kernel/user symbiosis.
1722 */
1723 case MRT6_ADD_MFC:
1724 case MRT6_DEL_MFC:
1725 parent = -1;
1726 case MRT6_ADD_MFC_PROXY:
1727 case MRT6_DEL_MFC_PROXY:
1728 if (optlen < sizeof(mfc))
1729 return -EINVAL;
1730 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1731 return -EFAULT;
1732 if (parent == 0)
1733 parent = mfc.mf6cc_parent;
1734 rtnl_lock();
1735 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1736 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1737 else
1738 ret = ip6mr_mfc_add(net, mrt, &mfc,
1739 sk == mrt->mroute6_sk, parent);
1740 rtnl_unlock();
1741 return ret;
1742
1743 /*
1744 * Control PIM assert (to activate pim will activate assert)
1745 */
1746 case MRT6_ASSERT:
1747 {
1748 int v;
1749
1750 if (optlen != sizeof(v))
1751 return -EINVAL;
1752 if (get_user(v, (int __user *)optval))
1753 return -EFAULT;
1754 mrt->mroute_do_assert = v;
1755 return 0;
1756 }
1757
1758 #ifdef CONFIG_IPV6_PIMSM_V2
1759 case MRT6_PIM:
1760 {
1761 int v;
1762
1763 if (optlen != sizeof(v))
1764 return -EINVAL;
1765 if (get_user(v, (int __user *)optval))
1766 return -EFAULT;
1767 v = !!v;
1768 rtnl_lock();
1769 ret = 0;
1770 if (v != mrt->mroute_do_pim) {
1771 mrt->mroute_do_pim = v;
1772 mrt->mroute_do_assert = v;
1773 }
1774 rtnl_unlock();
1775 return ret;
1776 }
1777
1778 #endif
1779 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1780 case MRT6_TABLE:
1781 {
1782 u32 v;
1783
1784 if (optlen != sizeof(u32))
1785 return -EINVAL;
1786 if (get_user(v, (u32 __user *)optval))
1787 return -EFAULT;
1788 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1789 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1790 return -EINVAL;
1791 if (sk == mrt->mroute6_sk)
1792 return -EBUSY;
1793
1794 rtnl_lock();
1795 ret = 0;
1796 if (!ip6mr_new_table(net, v))
1797 ret = -ENOMEM;
1798 raw6_sk(sk)->ip6mr_table = v;
1799 rtnl_unlock();
1800 return ret;
1801 }
1802 #endif
1803 /*
1804 * Spurious command, or MRT6_VERSION which you cannot
1805 * set.
1806 */
1807 default:
1808 return -ENOPROTOOPT;
1809 }
1810 }
1811
1812 /*
1813 * Getsock opt support for the multicast routing system.
1814 */
1815
1816 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1817 int __user *optlen)
1818 {
1819 int olr;
1820 int val;
1821 struct net *net = sock_net(sk);
1822 struct mr6_table *mrt;
1823
1824 if (sk->sk_type != SOCK_RAW ||
1825 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1826 return -EOPNOTSUPP;
1827
1828 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1829 if (!mrt)
1830 return -ENOENT;
1831
1832 switch (optname) {
1833 case MRT6_VERSION:
1834 val = 0x0305;
1835 break;
1836 #ifdef CONFIG_IPV6_PIMSM_V2
1837 case MRT6_PIM:
1838 val = mrt->mroute_do_pim;
1839 break;
1840 #endif
1841 case MRT6_ASSERT:
1842 val = mrt->mroute_do_assert;
1843 break;
1844 default:
1845 return -ENOPROTOOPT;
1846 }
1847
1848 if (get_user(olr, optlen))
1849 return -EFAULT;
1850
1851 olr = min_t(int, olr, sizeof(int));
1852 if (olr < 0)
1853 return -EINVAL;
1854
1855 if (put_user(olr, optlen))
1856 return -EFAULT;
1857 if (copy_to_user(optval, &val, olr))
1858 return -EFAULT;
1859 return 0;
1860 }
1861
1862 /*
1863 * The IP multicast ioctl support routines.
1864 */
1865
1866 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1867 {
1868 struct sioc_sg_req6 sr;
1869 struct sioc_mif_req6 vr;
1870 struct mif_device *vif;
1871 struct mfc6_cache *c;
1872 struct net *net = sock_net(sk);
1873 struct mr6_table *mrt;
1874
1875 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1876 if (!mrt)
1877 return -ENOENT;
1878
1879 switch (cmd) {
1880 case SIOCGETMIFCNT_IN6:
1881 if (copy_from_user(&vr, arg, sizeof(vr)))
1882 return -EFAULT;
1883 if (vr.mifi >= mrt->maxvif)
1884 return -EINVAL;
1885 read_lock(&mrt_lock);
1886 vif = &mrt->vif6_table[vr.mifi];
1887 if (MIF_EXISTS(mrt, vr.mifi)) {
1888 vr.icount = vif->pkt_in;
1889 vr.ocount = vif->pkt_out;
1890 vr.ibytes = vif->bytes_in;
1891 vr.obytes = vif->bytes_out;
1892 read_unlock(&mrt_lock);
1893
1894 if (copy_to_user(arg, &vr, sizeof(vr)))
1895 return -EFAULT;
1896 return 0;
1897 }
1898 read_unlock(&mrt_lock);
1899 return -EADDRNOTAVAIL;
1900 case SIOCGETSGCNT_IN6:
1901 if (copy_from_user(&sr, arg, sizeof(sr)))
1902 return -EFAULT;
1903
1904 read_lock(&mrt_lock);
1905 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1906 if (c) {
1907 sr.pktcnt = c->mfc_un.res.pkt;
1908 sr.bytecnt = c->mfc_un.res.bytes;
1909 sr.wrong_if = c->mfc_un.res.wrong_if;
1910 read_unlock(&mrt_lock);
1911
1912 if (copy_to_user(arg, &sr, sizeof(sr)))
1913 return -EFAULT;
1914 return 0;
1915 }
1916 read_unlock(&mrt_lock);
1917 return -EADDRNOTAVAIL;
1918 default:
1919 return -ENOIOCTLCMD;
1920 }
1921 }
1922
1923 #ifdef CONFIG_COMPAT
1924 struct compat_sioc_sg_req6 {
1925 struct sockaddr_in6 src;
1926 struct sockaddr_in6 grp;
1927 compat_ulong_t pktcnt;
1928 compat_ulong_t bytecnt;
1929 compat_ulong_t wrong_if;
1930 };
1931
1932 struct compat_sioc_mif_req6 {
1933 mifi_t mifi;
1934 compat_ulong_t icount;
1935 compat_ulong_t ocount;
1936 compat_ulong_t ibytes;
1937 compat_ulong_t obytes;
1938 };
1939
1940 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1941 {
1942 struct compat_sioc_sg_req6 sr;
1943 struct compat_sioc_mif_req6 vr;
1944 struct mif_device *vif;
1945 struct mfc6_cache *c;
1946 struct net *net = sock_net(sk);
1947 struct mr6_table *mrt;
1948
1949 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1950 if (!mrt)
1951 return -ENOENT;
1952
1953 switch (cmd) {
1954 case SIOCGETMIFCNT_IN6:
1955 if (copy_from_user(&vr, arg, sizeof(vr)))
1956 return -EFAULT;
1957 if (vr.mifi >= mrt->maxvif)
1958 return -EINVAL;
1959 read_lock(&mrt_lock);
1960 vif = &mrt->vif6_table[vr.mifi];
1961 if (MIF_EXISTS(mrt, vr.mifi)) {
1962 vr.icount = vif->pkt_in;
1963 vr.ocount = vif->pkt_out;
1964 vr.ibytes = vif->bytes_in;
1965 vr.obytes = vif->bytes_out;
1966 read_unlock(&mrt_lock);
1967
1968 if (copy_to_user(arg, &vr, sizeof(vr)))
1969 return -EFAULT;
1970 return 0;
1971 }
1972 read_unlock(&mrt_lock);
1973 return -EADDRNOTAVAIL;
1974 case SIOCGETSGCNT_IN6:
1975 if (copy_from_user(&sr, arg, sizeof(sr)))
1976 return -EFAULT;
1977
1978 read_lock(&mrt_lock);
1979 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1980 if (c) {
1981 sr.pktcnt = c->mfc_un.res.pkt;
1982 sr.bytecnt = c->mfc_un.res.bytes;
1983 sr.wrong_if = c->mfc_un.res.wrong_if;
1984 read_unlock(&mrt_lock);
1985
1986 if (copy_to_user(arg, &sr, sizeof(sr)))
1987 return -EFAULT;
1988 return 0;
1989 }
1990 read_unlock(&mrt_lock);
1991 return -EADDRNOTAVAIL;
1992 default:
1993 return -ENOIOCTLCMD;
1994 }
1995 }
1996 #endif
1997
1998 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1999 {
2000 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2001 IPSTATS_MIB_OUTFORWDATAGRAMS);
2002 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2003 IPSTATS_MIB_OUTOCTETS, skb->len);
2004 return dst_output(net, sk, skb);
2005 }
2006
2007 /*
2008 * Processing handlers for ip6mr_forward
2009 */
2010
2011 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2012 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2013 {
2014 struct ipv6hdr *ipv6h;
2015 struct mif_device *vif = &mrt->vif6_table[vifi];
2016 struct net_device *dev;
2017 struct dst_entry *dst;
2018 struct flowi6 fl6;
2019
2020 if (!vif->dev)
2021 goto out_free;
2022
2023 #ifdef CONFIG_IPV6_PIMSM_V2
2024 if (vif->flags & MIFF_REGISTER) {
2025 vif->pkt_out++;
2026 vif->bytes_out += skb->len;
2027 vif->dev->stats.tx_bytes += skb->len;
2028 vif->dev->stats.tx_packets++;
2029 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2030 goto out_free;
2031 }
2032 #endif
2033
2034 ipv6h = ipv6_hdr(skb);
2035
2036 fl6 = (struct flowi6) {
2037 .flowi6_oif = vif->link,
2038 .daddr = ipv6h->daddr,
2039 };
2040
2041 dst = ip6_route_output(net, NULL, &fl6);
2042 if (dst->error) {
2043 dst_release(dst);
2044 goto out_free;
2045 }
2046
2047 skb_dst_drop(skb);
2048 skb_dst_set(skb, dst);
2049
2050 /*
2051 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2052 * not only before forwarding, but after forwarding on all output
2053 * interfaces. It is clear, if mrouter runs a multicasting
2054 * program, it should receive packets not depending to what interface
2055 * program is joined.
2056 * If we will not make it, the program will have to join on all
2057 * interfaces. On the other hand, multihoming host (or router, but
2058 * not mrouter) cannot join to more than one interface - it will
2059 * result in receiving multiple packets.
2060 */
2061 dev = vif->dev;
2062 skb->dev = dev;
2063 vif->pkt_out++;
2064 vif->bytes_out += skb->len;
2065
2066 /* We are about to write */
2067 /* XXX: extension headers? */
2068 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2069 goto out_free;
2070
2071 ipv6h = ipv6_hdr(skb);
2072 ipv6h->hop_limit--;
2073
2074 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2075
2076 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2077 net, NULL, skb, skb->dev, dev,
2078 ip6mr_forward2_finish);
2079
2080 out_free:
2081 kfree_skb(skb);
2082 return 0;
2083 }
2084
2085 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2086 {
2087 int ct;
2088
2089 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2090 if (mrt->vif6_table[ct].dev == dev)
2091 break;
2092 }
2093 return ct;
2094 }
2095
2096 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2097 struct sk_buff *skb, struct mfc6_cache *cache)
2098 {
2099 int psend = -1;
2100 int vif, ct;
2101 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2102
2103 vif = cache->mf6c_parent;
2104 cache->mfc_un.res.pkt++;
2105 cache->mfc_un.res.bytes += skb->len;
2106 cache->mfc_un.res.lastuse = jiffies;
2107
2108 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2109 struct mfc6_cache *cache_proxy;
2110
2111 /* For an (*,G) entry, we only check that the incoming
2112 * interface is part of the static tree.
2113 */
2114 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2115 if (cache_proxy &&
2116 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2117 goto forward;
2118 }
2119
2120 /*
2121 * Wrong interface: drop packet and (maybe) send PIM assert.
2122 */
2123 if (mrt->vif6_table[vif].dev != skb->dev) {
2124 cache->mfc_un.res.wrong_if++;
2125
2126 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2127 /* pimsm uses asserts, when switching from RPT to SPT,
2128 so that we cannot check that packet arrived on an oif.
2129 It is bad, but otherwise we would need to move pretty
2130 large chunk of pimd to kernel. Ough... --ANK
2131 */
2132 (mrt->mroute_do_pim ||
2133 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2134 time_after(jiffies,
2135 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2136 cache->mfc_un.res.last_assert = jiffies;
2137 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2138 }
2139 goto dont_forward;
2140 }
2141
2142 forward:
2143 mrt->vif6_table[vif].pkt_in++;
2144 mrt->vif6_table[vif].bytes_in += skb->len;
2145
2146 /*
2147 * Forward the frame
2148 */
2149 if (ipv6_addr_any(&cache->mf6c_origin) &&
2150 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2151 if (true_vifi >= 0 &&
2152 true_vifi != cache->mf6c_parent &&
2153 ipv6_hdr(skb)->hop_limit >
2154 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2155 /* It's an (*,*) entry and the packet is not coming from
2156 * the upstream: forward the packet to the upstream
2157 * only.
2158 */
2159 psend = cache->mf6c_parent;
2160 goto last_forward;
2161 }
2162 goto dont_forward;
2163 }
2164 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2165 /* For (*,G) entry, don't forward to the incoming interface */
2166 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2167 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2168 if (psend != -1) {
2169 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2170 if (skb2)
2171 ip6mr_forward2(net, mrt, skb2, cache, psend);
2172 }
2173 psend = ct;
2174 }
2175 }
2176 last_forward:
2177 if (psend != -1) {
2178 ip6mr_forward2(net, mrt, skb, cache, psend);
2179 return;
2180 }
2181
2182 dont_forward:
2183 kfree_skb(skb);
2184 }
2185
2186
2187 /*
2188 * Multicast packets for forwarding arrive here
2189 */
2190
2191 int ip6_mr_input(struct sk_buff *skb)
2192 {
2193 struct mfc6_cache *cache;
2194 struct net *net = dev_net(skb->dev);
2195 struct mr6_table *mrt;
2196 struct flowi6 fl6 = {
2197 .flowi6_iif = skb->dev->ifindex,
2198 .flowi6_mark = skb->mark,
2199 };
2200 int err;
2201
2202 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2203 if (err < 0) {
2204 kfree_skb(skb);
2205 return err;
2206 }
2207
2208 read_lock(&mrt_lock);
2209 cache = ip6mr_cache_find(mrt,
2210 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2211 if (!cache) {
2212 int vif = ip6mr_find_vif(mrt, skb->dev);
2213
2214 if (vif >= 0)
2215 cache = ip6mr_cache_find_any(mrt,
2216 &ipv6_hdr(skb)->daddr,
2217 vif);
2218 }
2219
2220 /*
2221 * No usable cache entry
2222 */
2223 if (!cache) {
2224 int vif;
2225
2226 vif = ip6mr_find_vif(mrt, skb->dev);
2227 if (vif >= 0) {
2228 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2229 read_unlock(&mrt_lock);
2230
2231 return err;
2232 }
2233 read_unlock(&mrt_lock);
2234 kfree_skb(skb);
2235 return -ENODEV;
2236 }
2237
2238 ip6_mr_forward(net, mrt, skb, cache);
2239
2240 read_unlock(&mrt_lock);
2241
2242 return 0;
2243 }
2244
2245
2246 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2247 struct mfc6_cache *c, struct rtmsg *rtm)
2248 {
2249 struct rta_mfc_stats mfcs;
2250 struct nlattr *mp_attr;
2251 struct rtnexthop *nhp;
2252 unsigned long lastuse;
2253 int ct;
2254
2255 /* If cache is unresolved, don't try to parse IIF and OIF */
2256 if (c->mf6c_parent >= MAXMIFS) {
2257 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2258 return -ENOENT;
2259 }
2260
2261 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2262 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2263 return -EMSGSIZE;
2264 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2265 if (!mp_attr)
2266 return -EMSGSIZE;
2267
2268 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2269 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2270 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2271 if (!nhp) {
2272 nla_nest_cancel(skb, mp_attr);
2273 return -EMSGSIZE;
2274 }
2275
2276 nhp->rtnh_flags = 0;
2277 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2278 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2279 nhp->rtnh_len = sizeof(*nhp);
2280 }
2281 }
2282
2283 nla_nest_end(skb, mp_attr);
2284
2285 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2286 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2287
2288 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2289 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2290 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2291 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2292 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2293 RTA_PAD))
2294 return -EMSGSIZE;
2295
2296 rtm->rtm_type = RTN_MULTICAST;
2297 return 1;
2298 }
2299
2300 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2301 u32 portid)
2302 {
2303 int err;
2304 struct mr6_table *mrt;
2305 struct mfc6_cache *cache;
2306 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2307
2308 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2309 if (!mrt)
2310 return -ENOENT;
2311
2312 read_lock(&mrt_lock);
2313 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2314 if (!cache && skb->dev) {
2315 int vif = ip6mr_find_vif(mrt, skb->dev);
2316
2317 if (vif >= 0)
2318 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2319 vif);
2320 }
2321
2322 if (!cache) {
2323 struct sk_buff *skb2;
2324 struct ipv6hdr *iph;
2325 struct net_device *dev;
2326 int vif;
2327
2328 dev = skb->dev;
2329 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2330 read_unlock(&mrt_lock);
2331 return -ENODEV;
2332 }
2333
2334 /* really correct? */
2335 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2336 if (!skb2) {
2337 read_unlock(&mrt_lock);
2338 return -ENOMEM;
2339 }
2340
2341 NETLINK_CB(skb2).portid = portid;
2342 skb_reset_transport_header(skb2);
2343
2344 skb_put(skb2, sizeof(struct ipv6hdr));
2345 skb_reset_network_header(skb2);
2346
2347 iph = ipv6_hdr(skb2);
2348 iph->version = 0;
2349 iph->priority = 0;
2350 iph->flow_lbl[0] = 0;
2351 iph->flow_lbl[1] = 0;
2352 iph->flow_lbl[2] = 0;
2353 iph->payload_len = 0;
2354 iph->nexthdr = IPPROTO_NONE;
2355 iph->hop_limit = 0;
2356 iph->saddr = rt->rt6i_src.addr;
2357 iph->daddr = rt->rt6i_dst.addr;
2358
2359 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2360 read_unlock(&mrt_lock);
2361
2362 return err;
2363 }
2364
2365 if (rtm->rtm_flags & RTM_F_NOTIFY)
2366 cache->mfc_flags |= MFC_NOTIFY;
2367
2368 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2369 read_unlock(&mrt_lock);
2370 return err;
2371 }
2372
2373 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2374 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2375 int flags)
2376 {
2377 struct nlmsghdr *nlh;
2378 struct rtmsg *rtm;
2379 int err;
2380
2381 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2382 if (!nlh)
2383 return -EMSGSIZE;
2384
2385 rtm = nlmsg_data(nlh);
2386 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2387 rtm->rtm_dst_len = 128;
2388 rtm->rtm_src_len = 128;
2389 rtm->rtm_tos = 0;
2390 rtm->rtm_table = mrt->id;
2391 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2392 goto nla_put_failure;
2393 rtm->rtm_type = RTN_MULTICAST;
2394 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2395 if (c->mfc_flags & MFC_STATIC)
2396 rtm->rtm_protocol = RTPROT_STATIC;
2397 else
2398 rtm->rtm_protocol = RTPROT_MROUTED;
2399 rtm->rtm_flags = 0;
2400
2401 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2402 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2403 goto nla_put_failure;
2404 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2405 /* do not break the dump if cache is unresolved */
2406 if (err < 0 && err != -ENOENT)
2407 goto nla_put_failure;
2408
2409 nlmsg_end(skb, nlh);
2410 return 0;
2411
2412 nla_put_failure:
2413 nlmsg_cancel(skb, nlh);
2414 return -EMSGSIZE;
2415 }
2416
2417 static int mr6_msgsize(bool unresolved, int maxvif)
2418 {
2419 size_t len =
2420 NLMSG_ALIGN(sizeof(struct rtmsg))
2421 + nla_total_size(4) /* RTA_TABLE */
2422 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2423 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2424 ;
2425
2426 if (!unresolved)
2427 len = len
2428 + nla_total_size(4) /* RTA_IIF */
2429 + nla_total_size(0) /* RTA_MULTIPATH */
2430 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2431 /* RTA_MFC_STATS */
2432 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2433 ;
2434
2435 return len;
2436 }
2437
2438 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2439 int cmd)
2440 {
2441 struct net *net = read_pnet(&mrt->net);
2442 struct sk_buff *skb;
2443 int err = -ENOBUFS;
2444
2445 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2446 GFP_ATOMIC);
2447 if (!skb)
2448 goto errout;
2449
2450 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2451 if (err < 0)
2452 goto errout;
2453
2454 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2455 return;
2456
2457 errout:
2458 kfree_skb(skb);
2459 if (err < 0)
2460 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2461 }
2462
2463 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2464 {
2465 size_t len =
2466 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2467 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2468 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2469 /* IP6MRA_CREPORT_SRC_ADDR */
2470 + nla_total_size(sizeof(struct in6_addr))
2471 /* IP6MRA_CREPORT_DST_ADDR */
2472 + nla_total_size(sizeof(struct in6_addr))
2473 /* IP6MRA_CREPORT_PKT */
2474 + nla_total_size(payloadlen)
2475 ;
2476
2477 return len;
2478 }
2479
2480 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2481 {
2482 struct net *net = read_pnet(&mrt->net);
2483 struct nlmsghdr *nlh;
2484 struct rtgenmsg *rtgenm;
2485 struct mrt6msg *msg;
2486 struct sk_buff *skb;
2487 struct nlattr *nla;
2488 int payloadlen;
2489
2490 payloadlen = pkt->len - sizeof(struct mrt6msg);
2491 msg = (struct mrt6msg *)skb_transport_header(pkt);
2492
2493 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2494 if (!skb)
2495 goto errout;
2496
2497 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2498 sizeof(struct rtgenmsg), 0);
2499 if (!nlh)
2500 goto errout;
2501 rtgenm = nlmsg_data(nlh);
2502 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2503 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2504 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2505 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2506 &msg->im6_src) ||
2507 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2508 &msg->im6_dst))
2509 goto nla_put_failure;
2510
2511 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2512 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2513 nla_data(nla), payloadlen))
2514 goto nla_put_failure;
2515
2516 nlmsg_end(skb, nlh);
2517
2518 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2519 return;
2520
2521 nla_put_failure:
2522 nlmsg_cancel(skb, nlh);
2523 errout:
2524 kfree_skb(skb);
2525 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2526 }
2527
2528 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2529 {
2530 struct net *net = sock_net(skb->sk);
2531 struct mr6_table *mrt;
2532 struct mfc6_cache *mfc;
2533 unsigned int t = 0, s_t;
2534 unsigned int h = 0, s_h;
2535 unsigned int e = 0, s_e;
2536
2537 s_t = cb->args[0];
2538 s_h = cb->args[1];
2539 s_e = cb->args[2];
2540
2541 read_lock(&mrt_lock);
2542 ip6mr_for_each_table(mrt, net) {
2543 if (t < s_t)
2544 goto next_table;
2545 if (t > s_t)
2546 s_h = 0;
2547 for (h = s_h; h < MFC6_LINES; h++) {
2548 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2549 if (e < s_e)
2550 goto next_entry;
2551 if (ip6mr_fill_mroute(mrt, skb,
2552 NETLINK_CB(cb->skb).portid,
2553 cb->nlh->nlmsg_seq,
2554 mfc, RTM_NEWROUTE,
2555 NLM_F_MULTI) < 0)
2556 goto done;
2557 next_entry:
2558 e++;
2559 }
2560 e = s_e = 0;
2561 }
2562 spin_lock_bh(&mfc_unres_lock);
2563 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2564 if (e < s_e)
2565 goto next_entry2;
2566 if (ip6mr_fill_mroute(mrt, skb,
2567 NETLINK_CB(cb->skb).portid,
2568 cb->nlh->nlmsg_seq,
2569 mfc, RTM_NEWROUTE,
2570 NLM_F_MULTI) < 0) {
2571 spin_unlock_bh(&mfc_unres_lock);
2572 goto done;
2573 }
2574 next_entry2:
2575 e++;
2576 }
2577 spin_unlock_bh(&mfc_unres_lock);
2578 e = s_e = 0;
2579 s_h = 0;
2580 next_table:
2581 t++;
2582 }
2583 done:
2584 read_unlock(&mrt_lock);
2585
2586 cb->args[2] = e;
2587 cb->args[1] = h;
2588 cb->args[0] = t;
2589
2590 return skb->len;
2591 }