fix section mismatch warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6mr.c
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56
57 struct mr6_table {
58 struct list_head list;
59 #ifdef CONFIG_NET_NS
60 struct net *net;
61 #endif
62 u32 id;
63 struct sock *mroute6_sk;
64 struct timer_list ipmr_expire_timer;
65 struct list_head mfc6_unres_queue;
66 struct list_head mfc6_cache_array[MFC6_LINES];
67 struct mif_device vif6_table[MAXMIFS];
68 int maxvif;
69 atomic_t cache_resolve_queue_len;
70 bool mroute_do_assert;
71 bool mroute_do_pim;
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num;
74 #endif
75 };
76
77 struct ip6mr_rule {
78 struct fib_rule common;
79 };
80
81 struct ip6mr_result {
82 struct mr6_table *mrt;
83 };
84
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
87 */
88
89 static DEFINE_RWLOCK(mrt_lock);
90
91 /*
92 * Multicast router control variables
93 */
94
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
96
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
99
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
104
105 In this case data path is free of exclusive locks at all.
106 */
107
108 static struct kmem_cache *mrt_cachep __read_mostly;
109
110 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111 static void ip6mr_free_table(struct mr6_table *mrt);
112
113 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
120 int cmd);
121 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
123 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
124 static void ipmr_expire_process(unsigned long arg);
125
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129
130 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131 {
132 struct mr6_table *mrt;
133
134 ip6mr_for_each_table(mrt, net) {
135 if (mrt->id == id)
136 return mrt;
137 }
138 return NULL;
139 }
140
141 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt)
143 {
144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
150
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
153 if (err < 0)
154 return err;
155 *mrt = res.mrt;
156 return 0;
157 }
158
159 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
161 {
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
164
165 switch (rule->action) {
166 case FR_ACT_TO_TBL:
167 break;
168 case FR_ACT_UNREACHABLE:
169 return -ENETUNREACH;
170 case FR_ACT_PROHIBIT:
171 return -EACCES;
172 case FR_ACT_BLACKHOLE:
173 default:
174 return -EINVAL;
175 }
176
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
178 if (mrt == NULL)
179 return -EAGAIN;
180 res->mrt = mrt;
181 return 0;
182 }
183
184 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185 {
186 return 1;
187 }
188
189 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
190 FRA_GENERIC_POLICY,
191 };
192
193 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
195 {
196 return 0;
197 }
198
199 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 struct nlattr **tb)
201 {
202 return 1;
203 }
204
205 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
207 {
208 frh->dst_len = 0;
209 frh->src_len = 0;
210 frh->tos = 0;
211 return 0;
212 }
213
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
222 .default_pref = fib_default_rule_pref,
223 .fill = ip6mr_rule_fill,
224 .nlgroup = RTNLGRP_IPV6_RULE,
225 .policy = ip6mr_rule_policy,
226 .owner = THIS_MODULE,
227 };
228
229 static int __net_init ip6mr_rules_init(struct net *net)
230 {
231 struct fib_rules_ops *ops;
232 struct mr6_table *mrt;
233 int err;
234
235 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
236 if (IS_ERR(ops))
237 return PTR_ERR(ops);
238
239 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
240
241 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
242 if (mrt == NULL) {
243 err = -ENOMEM;
244 goto err1;
245 }
246
247 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
248 if (err < 0)
249 goto err2;
250
251 net->ipv6.mr6_rules_ops = ops;
252 return 0;
253
254 err2:
255 kfree(mrt);
256 err1:
257 fib_rules_unregister(ops);
258 return err;
259 }
260
261 static void __net_exit ip6mr_rules_exit(struct net *net)
262 {
263 struct mr6_table *mrt, *next;
264
265 rtnl_lock();
266 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
267 list_del(&mrt->list);
268 ip6mr_free_table(mrt);
269 }
270 rtnl_unlock();
271 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272 }
273 #else
274 #define ip6mr_for_each_table(mrt, net) \
275 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
276
277 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
278 {
279 return net->ipv6.mrt6;
280 }
281
282 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
283 struct mr6_table **mrt)
284 {
285 *mrt = net->ipv6.mrt6;
286 return 0;
287 }
288
289 static int __net_init ip6mr_rules_init(struct net *net)
290 {
291 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
292 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293 }
294
295 static void __net_exit ip6mr_rules_exit(struct net *net)
296 {
297 rtnl_lock();
298 ip6mr_free_table(net->ipv6.mrt6);
299 net->ipv6.mrt6 = NULL;
300 rtnl_unlock();
301 }
302 #endif
303
304 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
305 {
306 struct mr6_table *mrt;
307 unsigned int i;
308
309 mrt = ip6mr_get_table(net, id);
310 if (mrt != NULL)
311 return mrt;
312
313 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
314 if (mrt == NULL)
315 return NULL;
316 mrt->id = id;
317 write_pnet(&mrt->net, net);
318
319 /* Forwarding cache */
320 for (i = 0; i < MFC6_LINES; i++)
321 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
322
323 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
324
325 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 (unsigned long)mrt);
327
328 #ifdef CONFIG_IPV6_PIMSM_V2
329 mrt->mroute_reg_vif_num = -1;
330 #endif
331 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
332 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
333 #endif
334 return mrt;
335 }
336
337 static void ip6mr_free_table(struct mr6_table *mrt)
338 {
339 del_timer_sync(&mrt->ipmr_expire_timer);
340 mroute_clean_tables(mrt, true);
341 kfree(mrt);
342 }
343
344 #ifdef CONFIG_PROC_FS
345
346 struct ipmr_mfc_iter {
347 struct seq_net_private p;
348 struct mr6_table *mrt;
349 struct list_head *cache;
350 int ct;
351 };
352
353
354 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
355 struct ipmr_mfc_iter *it, loff_t pos)
356 {
357 struct mr6_table *mrt = it->mrt;
358 struct mfc6_cache *mfc;
359
360 read_lock(&mrt_lock);
361 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
362 it->cache = &mrt->mfc6_cache_array[it->ct];
363 list_for_each_entry(mfc, it->cache, list)
364 if (pos-- == 0)
365 return mfc;
366 }
367 read_unlock(&mrt_lock);
368
369 spin_lock_bh(&mfc_unres_lock);
370 it->cache = &mrt->mfc6_unres_queue;
371 list_for_each_entry(mfc, it->cache, list)
372 if (pos-- == 0)
373 return mfc;
374 spin_unlock_bh(&mfc_unres_lock);
375
376 it->cache = NULL;
377 return NULL;
378 }
379
380 /*
381 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 */
383
384 struct ipmr_vif_iter {
385 struct seq_net_private p;
386 struct mr6_table *mrt;
387 int ct;
388 };
389
390 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
391 struct ipmr_vif_iter *iter,
392 loff_t pos)
393 {
394 struct mr6_table *mrt = iter->mrt;
395
396 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
397 if (!MIF_EXISTS(mrt, iter->ct))
398 continue;
399 if (pos-- == 0)
400 return &mrt->vif6_table[iter->ct];
401 }
402 return NULL;
403 }
404
405 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 __acquires(mrt_lock)
407 {
408 struct ipmr_vif_iter *iter = seq->private;
409 struct net *net = seq_file_net(seq);
410 struct mr6_table *mrt;
411
412 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
413 if (mrt == NULL)
414 return ERR_PTR(-ENOENT);
415
416 iter->mrt = mrt;
417
418 read_lock(&mrt_lock);
419 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
420 : SEQ_START_TOKEN;
421 }
422
423 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
424 {
425 struct ipmr_vif_iter *iter = seq->private;
426 struct net *net = seq_file_net(seq);
427 struct mr6_table *mrt = iter->mrt;
428
429 ++*pos;
430 if (v == SEQ_START_TOKEN)
431 return ip6mr_vif_seq_idx(net, iter, 0);
432
433 while (++iter->ct < mrt->maxvif) {
434 if (!MIF_EXISTS(mrt, iter->ct))
435 continue;
436 return &mrt->vif6_table[iter->ct];
437 }
438 return NULL;
439 }
440
441 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
442 __releases(mrt_lock)
443 {
444 read_unlock(&mrt_lock);
445 }
446
447 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
448 {
449 struct ipmr_vif_iter *iter = seq->private;
450 struct mr6_table *mrt = iter->mrt;
451
452 if (v == SEQ_START_TOKEN) {
453 seq_puts(seq,
454 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
455 } else {
456 const struct mif_device *vif = v;
457 const char *name = vif->dev ? vif->dev->name : "none";
458
459 seq_printf(seq,
460 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
461 vif - mrt->vif6_table,
462 name, vif->bytes_in, vif->pkt_in,
463 vif->bytes_out, vif->pkt_out,
464 vif->flags);
465 }
466 return 0;
467 }
468
469 static const struct seq_operations ip6mr_vif_seq_ops = {
470 .start = ip6mr_vif_seq_start,
471 .next = ip6mr_vif_seq_next,
472 .stop = ip6mr_vif_seq_stop,
473 .show = ip6mr_vif_seq_show,
474 };
475
476 static int ip6mr_vif_open(struct inode *inode, struct file *file)
477 {
478 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
479 sizeof(struct ipmr_vif_iter));
480 }
481
482 static const struct file_operations ip6mr_vif_fops = {
483 .owner = THIS_MODULE,
484 .open = ip6mr_vif_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
487 .release = seq_release_net,
488 };
489
490 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
491 {
492 struct ipmr_mfc_iter *it = seq->private;
493 struct net *net = seq_file_net(seq);
494 struct mr6_table *mrt;
495
496 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
497 if (mrt == NULL)
498 return ERR_PTR(-ENOENT);
499
500 it->mrt = mrt;
501 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
502 : SEQ_START_TOKEN;
503 }
504
505 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
506 {
507 struct mfc6_cache *mfc = v;
508 struct ipmr_mfc_iter *it = seq->private;
509 struct net *net = seq_file_net(seq);
510 struct mr6_table *mrt = it->mrt;
511
512 ++*pos;
513
514 if (v == SEQ_START_TOKEN)
515 return ipmr_mfc_seq_idx(net, seq->private, 0);
516
517 if (mfc->list.next != it->cache)
518 return list_entry(mfc->list.next, struct mfc6_cache, list);
519
520 if (it->cache == &mrt->mfc6_unres_queue)
521 goto end_of_list;
522
523 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
524
525 while (++it->ct < MFC6_LINES) {
526 it->cache = &mrt->mfc6_cache_array[it->ct];
527 if (list_empty(it->cache))
528 continue;
529 return list_first_entry(it->cache, struct mfc6_cache, list);
530 }
531
532 /* exhausted cache_array, show unresolved */
533 read_unlock(&mrt_lock);
534 it->cache = &mrt->mfc6_unres_queue;
535 it->ct = 0;
536
537 spin_lock_bh(&mfc_unres_lock);
538 if (!list_empty(it->cache))
539 return list_first_entry(it->cache, struct mfc6_cache, list);
540
541 end_of_list:
542 spin_unlock_bh(&mfc_unres_lock);
543 it->cache = NULL;
544
545 return NULL;
546 }
547
548 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
549 {
550 struct ipmr_mfc_iter *it = seq->private;
551 struct mr6_table *mrt = it->mrt;
552
553 if (it->cache == &mrt->mfc6_unres_queue)
554 spin_unlock_bh(&mfc_unres_lock);
555 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
556 read_unlock(&mrt_lock);
557 }
558
559 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
560 {
561 int n;
562
563 if (v == SEQ_START_TOKEN) {
564 seq_puts(seq,
565 "Group "
566 "Origin "
567 "Iif Pkts Bytes Wrong Oifs\n");
568 } else {
569 const struct mfc6_cache *mfc = v;
570 const struct ipmr_mfc_iter *it = seq->private;
571 struct mr6_table *mrt = it->mrt;
572
573 seq_printf(seq, "%pI6 %pI6 %-3hd",
574 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
575 mfc->mf6c_parent);
576
577 if (it->cache != &mrt->mfc6_unres_queue) {
578 seq_printf(seq, " %8lu %8lu %8lu",
579 mfc->mfc_un.res.pkt,
580 mfc->mfc_un.res.bytes,
581 mfc->mfc_un.res.wrong_if);
582 for (n = mfc->mfc_un.res.minvif;
583 n < mfc->mfc_un.res.maxvif; n++) {
584 if (MIF_EXISTS(mrt, n) &&
585 mfc->mfc_un.res.ttls[n] < 255)
586 seq_printf(seq,
587 " %2d:%-3d",
588 n, mfc->mfc_un.res.ttls[n]);
589 }
590 } else {
591 /* unresolved mfc_caches don't contain
592 * pkt, bytes and wrong_if values
593 */
594 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
595 }
596 seq_putc(seq, '\n');
597 }
598 return 0;
599 }
600
601 static const struct seq_operations ipmr_mfc_seq_ops = {
602 .start = ipmr_mfc_seq_start,
603 .next = ipmr_mfc_seq_next,
604 .stop = ipmr_mfc_seq_stop,
605 .show = ipmr_mfc_seq_show,
606 };
607
608 static int ipmr_mfc_open(struct inode *inode, struct file *file)
609 {
610 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
611 sizeof(struct ipmr_mfc_iter));
612 }
613
614 static const struct file_operations ip6mr_mfc_fops = {
615 .owner = THIS_MODULE,
616 .open = ipmr_mfc_open,
617 .read = seq_read,
618 .llseek = seq_lseek,
619 .release = seq_release_net,
620 };
621 #endif
622
623 #ifdef CONFIG_IPV6_PIMSM_V2
624
625 static int pim6_rcv(struct sk_buff *skb)
626 {
627 struct pimreghdr *pim;
628 struct ipv6hdr *encap;
629 struct net_device *reg_dev = NULL;
630 struct net *net = dev_net(skb->dev);
631 struct mr6_table *mrt;
632 struct flowi6 fl6 = {
633 .flowi6_iif = skb->dev->ifindex,
634 .flowi6_mark = skb->mark,
635 };
636 int reg_vif_num;
637
638 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
639 goto drop;
640
641 pim = (struct pimreghdr *)skb_transport_header(skb);
642 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
643 (pim->flags & PIM_NULL_REGISTER) ||
644 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
645 sizeof(*pim), IPPROTO_PIM,
646 csum_partial((void *)pim, sizeof(*pim), 0)) &&
647 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
648 goto drop;
649
650 /* check if the inner packet is destined to mcast group */
651 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
652 sizeof(*pim));
653
654 if (!ipv6_addr_is_multicast(&encap->daddr) ||
655 encap->payload_len == 0 ||
656 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
657 goto drop;
658
659 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
660 goto drop;
661 reg_vif_num = mrt->mroute_reg_vif_num;
662
663 read_lock(&mrt_lock);
664 if (reg_vif_num >= 0)
665 reg_dev = mrt->vif6_table[reg_vif_num].dev;
666 if (reg_dev)
667 dev_hold(reg_dev);
668 read_unlock(&mrt_lock);
669
670 if (reg_dev == NULL)
671 goto drop;
672
673 skb->mac_header = skb->network_header;
674 skb_pull(skb, (u8 *)encap - skb->data);
675 skb_reset_network_header(skb);
676 skb->protocol = htons(ETH_P_IPV6);
677 skb->ip_summed = CHECKSUM_NONE;
678 skb->pkt_type = PACKET_HOST;
679
680 skb_tunnel_rx(skb, reg_dev);
681
682 netif_rx(skb);
683
684 dev_put(reg_dev);
685 return 0;
686 drop:
687 kfree_skb(skb);
688 return 0;
689 }
690
691 static const struct inet6_protocol pim6_protocol = {
692 .handler = pim6_rcv,
693 };
694
695 /* Service routines creating virtual interfaces: PIMREG */
696
697 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
698 struct net_device *dev)
699 {
700 struct net *net = dev_net(dev);
701 struct mr6_table *mrt;
702 struct flowi6 fl6 = {
703 .flowi6_oif = dev->ifindex,
704 .flowi6_iif = skb->skb_iif,
705 .flowi6_mark = skb->mark,
706 };
707 int err;
708
709 err = ip6mr_fib_lookup(net, &fl6, &mrt);
710 if (err < 0) {
711 kfree_skb(skb);
712 return err;
713 }
714
715 read_lock(&mrt_lock);
716 dev->stats.tx_bytes += skb->len;
717 dev->stats.tx_packets++;
718 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
719 read_unlock(&mrt_lock);
720 kfree_skb(skb);
721 return NETDEV_TX_OK;
722 }
723
724 static const struct net_device_ops reg_vif_netdev_ops = {
725 .ndo_start_xmit = reg_vif_xmit,
726 };
727
728 static void reg_vif_setup(struct net_device *dev)
729 {
730 dev->type = ARPHRD_PIMREG;
731 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
732 dev->flags = IFF_NOARP;
733 dev->netdev_ops = &reg_vif_netdev_ops;
734 dev->destructor = free_netdev;
735 dev->features |= NETIF_F_NETNS_LOCAL;
736 }
737
738 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
739 {
740 struct net_device *dev;
741 char name[IFNAMSIZ];
742
743 if (mrt->id == RT6_TABLE_DFLT)
744 sprintf(name, "pim6reg");
745 else
746 sprintf(name, "pim6reg%u", mrt->id);
747
748 dev = alloc_netdev(0, name, reg_vif_setup);
749 if (dev == NULL)
750 return NULL;
751
752 dev_net_set(dev, net);
753
754 if (register_netdevice(dev)) {
755 free_netdev(dev);
756 return NULL;
757 }
758 dev->iflink = 0;
759
760 if (dev_open(dev))
761 goto failure;
762
763 dev_hold(dev);
764 return dev;
765
766 failure:
767 /* allow the register to be completed before unregistering. */
768 rtnl_unlock();
769 rtnl_lock();
770
771 unregister_netdevice(dev);
772 return NULL;
773 }
774 #endif
775
776 /*
777 * Delete a VIF entry
778 */
779
780 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
781 struct list_head *head)
782 {
783 struct mif_device *v;
784 struct net_device *dev;
785 struct inet6_dev *in6_dev;
786
787 if (vifi < 0 || vifi >= mrt->maxvif)
788 return -EADDRNOTAVAIL;
789
790 v = &mrt->vif6_table[vifi];
791
792 write_lock_bh(&mrt_lock);
793 dev = v->dev;
794 v->dev = NULL;
795
796 if (!dev) {
797 write_unlock_bh(&mrt_lock);
798 return -EADDRNOTAVAIL;
799 }
800
801 #ifdef CONFIG_IPV6_PIMSM_V2
802 if (vifi == mrt->mroute_reg_vif_num)
803 mrt->mroute_reg_vif_num = -1;
804 #endif
805
806 if (vifi + 1 == mrt->maxvif) {
807 int tmp;
808 for (tmp = vifi - 1; tmp >= 0; tmp--) {
809 if (MIF_EXISTS(mrt, tmp))
810 break;
811 }
812 mrt->maxvif = tmp + 1;
813 }
814
815 write_unlock_bh(&mrt_lock);
816
817 dev_set_allmulti(dev, -1);
818
819 in6_dev = __in6_dev_get(dev);
820 if (in6_dev) {
821 in6_dev->cnf.mc_forwarding--;
822 inet6_netconf_notify_devconf(dev_net(dev),
823 NETCONFA_MC_FORWARDING,
824 dev->ifindex, &in6_dev->cnf);
825 }
826
827 if ((v->flags & MIFF_REGISTER) && !notify)
828 unregister_netdevice_queue(dev, head);
829
830 dev_put(dev);
831 return 0;
832 }
833
834 static inline void ip6mr_cache_free(struct mfc6_cache *c)
835 {
836 kmem_cache_free(mrt_cachep, c);
837 }
838
839 /* Destroy an unresolved cache entry, killing queued skbs
840 and reporting error to netlink readers.
841 */
842
843 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
844 {
845 struct net *net = read_pnet(&mrt->net);
846 struct sk_buff *skb;
847
848 atomic_dec(&mrt->cache_resolve_queue_len);
849
850 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
851 if (ipv6_hdr(skb)->version == 0) {
852 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
853 nlh->nlmsg_type = NLMSG_ERROR;
854 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
855 skb_trim(skb, nlh->nlmsg_len);
856 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
857 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
858 } else
859 kfree_skb(skb);
860 }
861
862 ip6mr_cache_free(c);
863 }
864
865
866 /* Timer process for all the unresolved queue. */
867
868 static void ipmr_do_expire_process(struct mr6_table *mrt)
869 {
870 unsigned long now = jiffies;
871 unsigned long expires = 10 * HZ;
872 struct mfc6_cache *c, *next;
873
874 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
875 if (time_after(c->mfc_un.unres.expires, now)) {
876 /* not yet... */
877 unsigned long interval = c->mfc_un.unres.expires - now;
878 if (interval < expires)
879 expires = interval;
880 continue;
881 }
882
883 list_del(&c->list);
884 mr6_netlink_event(mrt, c, RTM_DELROUTE);
885 ip6mr_destroy_unres(mrt, c);
886 }
887
888 if (!list_empty(&mrt->mfc6_unres_queue))
889 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
890 }
891
892 static void ipmr_expire_process(unsigned long arg)
893 {
894 struct mr6_table *mrt = (struct mr6_table *)arg;
895
896 if (!spin_trylock(&mfc_unres_lock)) {
897 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
898 return;
899 }
900
901 if (!list_empty(&mrt->mfc6_unres_queue))
902 ipmr_do_expire_process(mrt);
903
904 spin_unlock(&mfc_unres_lock);
905 }
906
907 /* Fill oifs list. It is called under write locked mrt_lock. */
908
909 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
910 unsigned char *ttls)
911 {
912 int vifi;
913
914 cache->mfc_un.res.minvif = MAXMIFS;
915 cache->mfc_un.res.maxvif = 0;
916 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
917
918 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
919 if (MIF_EXISTS(mrt, vifi) &&
920 ttls[vifi] && ttls[vifi] < 255) {
921 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
922 if (cache->mfc_un.res.minvif > vifi)
923 cache->mfc_un.res.minvif = vifi;
924 if (cache->mfc_un.res.maxvif <= vifi)
925 cache->mfc_un.res.maxvif = vifi + 1;
926 }
927 }
928 }
929
930 static int mif6_add(struct net *net, struct mr6_table *mrt,
931 struct mif6ctl *vifc, int mrtsock)
932 {
933 int vifi = vifc->mif6c_mifi;
934 struct mif_device *v = &mrt->vif6_table[vifi];
935 struct net_device *dev;
936 struct inet6_dev *in6_dev;
937 int err;
938
939 /* Is vif busy ? */
940 if (MIF_EXISTS(mrt, vifi))
941 return -EADDRINUSE;
942
943 switch (vifc->mif6c_flags) {
944 #ifdef CONFIG_IPV6_PIMSM_V2
945 case MIFF_REGISTER:
946 /*
947 * Special Purpose VIF in PIM
948 * All the packets will be sent to the daemon
949 */
950 if (mrt->mroute_reg_vif_num >= 0)
951 return -EADDRINUSE;
952 dev = ip6mr_reg_vif(net, mrt);
953 if (!dev)
954 return -ENOBUFS;
955 err = dev_set_allmulti(dev, 1);
956 if (err) {
957 unregister_netdevice(dev);
958 dev_put(dev);
959 return err;
960 }
961 break;
962 #endif
963 case 0:
964 dev = dev_get_by_index(net, vifc->mif6c_pifi);
965 if (!dev)
966 return -EADDRNOTAVAIL;
967 err = dev_set_allmulti(dev, 1);
968 if (err) {
969 dev_put(dev);
970 return err;
971 }
972 break;
973 default:
974 return -EINVAL;
975 }
976
977 in6_dev = __in6_dev_get(dev);
978 if (in6_dev) {
979 in6_dev->cnf.mc_forwarding++;
980 inet6_netconf_notify_devconf(dev_net(dev),
981 NETCONFA_MC_FORWARDING,
982 dev->ifindex, &in6_dev->cnf);
983 }
984
985 /*
986 * Fill in the VIF structures
987 */
988 v->rate_limit = vifc->vifc_rate_limit;
989 v->flags = vifc->mif6c_flags;
990 if (!mrtsock)
991 v->flags |= VIFF_STATIC;
992 v->threshold = vifc->vifc_threshold;
993 v->bytes_in = 0;
994 v->bytes_out = 0;
995 v->pkt_in = 0;
996 v->pkt_out = 0;
997 v->link = dev->ifindex;
998 if (v->flags & MIFF_REGISTER)
999 v->link = dev->iflink;
1000
1001 /* And finish update writing critical data */
1002 write_lock_bh(&mrt_lock);
1003 v->dev = dev;
1004 #ifdef CONFIG_IPV6_PIMSM_V2
1005 if (v->flags & MIFF_REGISTER)
1006 mrt->mroute_reg_vif_num = vifi;
1007 #endif
1008 if (vifi + 1 > mrt->maxvif)
1009 mrt->maxvif = vifi + 1;
1010 write_unlock_bh(&mrt_lock);
1011 return 0;
1012 }
1013
1014 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1015 const struct in6_addr *origin,
1016 const struct in6_addr *mcastgrp)
1017 {
1018 int line = MFC6_HASH(mcastgrp, origin);
1019 struct mfc6_cache *c;
1020
1021 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1022 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1023 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1024 return c;
1025 }
1026 return NULL;
1027 }
1028
1029 /* Look for a (*,*,oif) entry */
1030 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1031 mifi_t mifi)
1032 {
1033 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1034 struct mfc6_cache *c;
1035
1036 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1037 if (ipv6_addr_any(&c->mf6c_origin) &&
1038 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1039 (c->mfc_un.res.ttls[mifi] < 255))
1040 return c;
1041
1042 return NULL;
1043 }
1044
1045 /* Look for a (*,G) entry */
1046 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1047 struct in6_addr *mcastgrp,
1048 mifi_t mifi)
1049 {
1050 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1051 struct mfc6_cache *c, *proxy;
1052
1053 if (ipv6_addr_any(mcastgrp))
1054 goto skip;
1055
1056 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1057 if (ipv6_addr_any(&c->mf6c_origin) &&
1058 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1059 if (c->mfc_un.res.ttls[mifi] < 255)
1060 return c;
1061
1062 /* It's ok if the mifi is part of the static tree */
1063 proxy = ip6mr_cache_find_any_parent(mrt,
1064 c->mf6c_parent);
1065 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1066 return c;
1067 }
1068
1069 skip:
1070 return ip6mr_cache_find_any_parent(mrt, mifi);
1071 }
1072
1073 /*
1074 * Allocate a multicast cache entry
1075 */
1076 static struct mfc6_cache *ip6mr_cache_alloc(void)
1077 {
1078 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1079 if (c == NULL)
1080 return NULL;
1081 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1082 c->mfc_un.res.minvif = MAXMIFS;
1083 return c;
1084 }
1085
1086 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1087 {
1088 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1089 if (c == NULL)
1090 return NULL;
1091 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1092 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1093 return c;
1094 }
1095
1096 /*
1097 * A cache entry has gone into a resolved state from queued
1098 */
1099
1100 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1101 struct mfc6_cache *uc, struct mfc6_cache *c)
1102 {
1103 struct sk_buff *skb;
1104
1105 /*
1106 * Play the pending entries through our router
1107 */
1108
1109 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1110 if (ipv6_hdr(skb)->version == 0) {
1111 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1112
1113 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1114 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1115 } else {
1116 nlh->nlmsg_type = NLMSG_ERROR;
1117 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1118 skb_trim(skb, nlh->nlmsg_len);
1119 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1120 }
1121 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1122 } else
1123 ip6_mr_forward(net, mrt, skb, c);
1124 }
1125 }
1126
1127 /*
1128 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1129 * expects the following bizarre scheme.
1130 *
1131 * Called under mrt_lock.
1132 */
1133
1134 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1135 mifi_t mifi, int assert)
1136 {
1137 struct sk_buff *skb;
1138 struct mrt6msg *msg;
1139 int ret;
1140
1141 #ifdef CONFIG_IPV6_PIMSM_V2
1142 if (assert == MRT6MSG_WHOLEPKT)
1143 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1144 +sizeof(*msg));
1145 else
1146 #endif
1147 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1148
1149 if (!skb)
1150 return -ENOBUFS;
1151
1152 /* I suppose that internal messages
1153 * do not require checksums */
1154
1155 skb->ip_summed = CHECKSUM_UNNECESSARY;
1156
1157 #ifdef CONFIG_IPV6_PIMSM_V2
1158 if (assert == MRT6MSG_WHOLEPKT) {
1159 /* Ugly, but we have no choice with this interface.
1160 Duplicate old header, fix length etc.
1161 And all this only to mangle msg->im6_msgtype and
1162 to set msg->im6_mbz to "mbz" :-)
1163 */
1164 skb_push(skb, -skb_network_offset(pkt));
1165
1166 skb_push(skb, sizeof(*msg));
1167 skb_reset_transport_header(skb);
1168 msg = (struct mrt6msg *)skb_transport_header(skb);
1169 msg->im6_mbz = 0;
1170 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1171 msg->im6_mif = mrt->mroute_reg_vif_num;
1172 msg->im6_pad = 0;
1173 msg->im6_src = ipv6_hdr(pkt)->saddr;
1174 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1175
1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
1177 } else
1178 #endif
1179 {
1180 /*
1181 * Copy the IP header
1182 */
1183
1184 skb_put(skb, sizeof(struct ipv6hdr));
1185 skb_reset_network_header(skb);
1186 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1187
1188 /*
1189 * Add our header
1190 */
1191 skb_put(skb, sizeof(*msg));
1192 skb_reset_transport_header(skb);
1193 msg = (struct mrt6msg *)skb_transport_header(skb);
1194
1195 msg->im6_mbz = 0;
1196 msg->im6_msgtype = assert;
1197 msg->im6_mif = mifi;
1198 msg->im6_pad = 0;
1199 msg->im6_src = ipv6_hdr(pkt)->saddr;
1200 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1201
1202 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1203 skb->ip_summed = CHECKSUM_UNNECESSARY;
1204 }
1205
1206 if (mrt->mroute6_sk == NULL) {
1207 kfree_skb(skb);
1208 return -EINVAL;
1209 }
1210
1211 /*
1212 * Deliver to user space multicast routing algorithms
1213 */
1214 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1215 if (ret < 0) {
1216 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1217 kfree_skb(skb);
1218 }
1219
1220 return ret;
1221 }
1222
1223 /*
1224 * Queue a packet for resolution. It gets locked cache entry!
1225 */
1226
1227 static int
1228 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1229 {
1230 bool found = false;
1231 int err;
1232 struct mfc6_cache *c;
1233
1234 spin_lock_bh(&mfc_unres_lock);
1235 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1236 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1237 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1238 found = true;
1239 break;
1240 }
1241 }
1242
1243 if (!found) {
1244 /*
1245 * Create a new entry if allowable
1246 */
1247
1248 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1249 (c = ip6mr_cache_alloc_unres()) == NULL) {
1250 spin_unlock_bh(&mfc_unres_lock);
1251
1252 kfree_skb(skb);
1253 return -ENOBUFS;
1254 }
1255
1256 /*
1257 * Fill in the new cache entry
1258 */
1259 c->mf6c_parent = -1;
1260 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1261 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1262
1263 /*
1264 * Reflect first query at pim6sd
1265 */
1266 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1267 if (err < 0) {
1268 /* If the report failed throw the cache entry
1269 out - Brad Parker
1270 */
1271 spin_unlock_bh(&mfc_unres_lock);
1272
1273 ip6mr_cache_free(c);
1274 kfree_skb(skb);
1275 return err;
1276 }
1277
1278 atomic_inc(&mrt->cache_resolve_queue_len);
1279 list_add(&c->list, &mrt->mfc6_unres_queue);
1280 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1281
1282 ipmr_do_expire_process(mrt);
1283 }
1284
1285 /*
1286 * See if we can append the packet
1287 */
1288 if (c->mfc_un.unres.unresolved.qlen > 3) {
1289 kfree_skb(skb);
1290 err = -ENOBUFS;
1291 } else {
1292 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1293 err = 0;
1294 }
1295
1296 spin_unlock_bh(&mfc_unres_lock);
1297 return err;
1298 }
1299
1300 /*
1301 * MFC6 cache manipulation by user space
1302 */
1303
1304 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1305 int parent)
1306 {
1307 int line;
1308 struct mfc6_cache *c, *next;
1309
1310 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1311
1312 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1313 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1314 ipv6_addr_equal(&c->mf6c_mcastgrp,
1315 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1316 (parent == -1 || parent == c->mf6c_parent)) {
1317 write_lock_bh(&mrt_lock);
1318 list_del(&c->list);
1319 write_unlock_bh(&mrt_lock);
1320
1321 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1322 ip6mr_cache_free(c);
1323 return 0;
1324 }
1325 }
1326 return -ENOENT;
1327 }
1328
1329 static int ip6mr_device_event(struct notifier_block *this,
1330 unsigned long event, void *ptr)
1331 {
1332 struct net_device *dev = ptr;
1333 struct net *net = dev_net(dev);
1334 struct mr6_table *mrt;
1335 struct mif_device *v;
1336 int ct;
1337
1338 if (event != NETDEV_UNREGISTER)
1339 return NOTIFY_DONE;
1340
1341 ip6mr_for_each_table(mrt, net) {
1342 v = &mrt->vif6_table[0];
1343 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1344 if (v->dev == dev)
1345 mif6_delete(mrt, ct, 1, NULL);
1346 }
1347 }
1348
1349 return NOTIFY_DONE;
1350 }
1351
1352 static struct notifier_block ip6_mr_notifier = {
1353 .notifier_call = ip6mr_device_event
1354 };
1355
1356 /*
1357 * Setup for IP multicast routing
1358 */
1359
1360 static int __net_init ip6mr_net_init(struct net *net)
1361 {
1362 int err;
1363
1364 err = ip6mr_rules_init(net);
1365 if (err < 0)
1366 goto fail;
1367
1368 #ifdef CONFIG_PROC_FS
1369 err = -ENOMEM;
1370 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1371 goto proc_vif_fail;
1372 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1373 goto proc_cache_fail;
1374 #endif
1375
1376 return 0;
1377
1378 #ifdef CONFIG_PROC_FS
1379 proc_cache_fail:
1380 remove_proc_entry("ip6_mr_vif", net->proc_net);
1381 proc_vif_fail:
1382 ip6mr_rules_exit(net);
1383 #endif
1384 fail:
1385 return err;
1386 }
1387
1388 static void __net_exit ip6mr_net_exit(struct net *net)
1389 {
1390 #ifdef CONFIG_PROC_FS
1391 remove_proc_entry("ip6_mr_cache", net->proc_net);
1392 remove_proc_entry("ip6_mr_vif", net->proc_net);
1393 #endif
1394 ip6mr_rules_exit(net);
1395 }
1396
1397 static struct pernet_operations ip6mr_net_ops = {
1398 .init = ip6mr_net_init,
1399 .exit = ip6mr_net_exit,
1400 };
1401
1402 int __init ip6_mr_init(void)
1403 {
1404 int err;
1405
1406 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1407 sizeof(struct mfc6_cache),
1408 0, SLAB_HWCACHE_ALIGN,
1409 NULL);
1410 if (!mrt_cachep)
1411 return -ENOMEM;
1412
1413 err = register_pernet_subsys(&ip6mr_net_ops);
1414 if (err)
1415 goto reg_pernet_fail;
1416
1417 err = register_netdevice_notifier(&ip6_mr_notifier);
1418 if (err)
1419 goto reg_notif_fail;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1422 pr_err("%s: can't add PIM protocol\n", __func__);
1423 err = -EAGAIN;
1424 goto add_proto_fail;
1425 }
1426 #endif
1427 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1428 ip6mr_rtm_dumproute, NULL);
1429 return 0;
1430 #ifdef CONFIG_IPV6_PIMSM_V2
1431 add_proto_fail:
1432 unregister_netdevice_notifier(&ip6_mr_notifier);
1433 #endif
1434 reg_notif_fail:
1435 unregister_pernet_subsys(&ip6mr_net_ops);
1436 reg_pernet_fail:
1437 kmem_cache_destroy(mrt_cachep);
1438 return err;
1439 }
1440
1441 void ip6_mr_cleanup(void)
1442 {
1443 unregister_netdevice_notifier(&ip6_mr_notifier);
1444 unregister_pernet_subsys(&ip6mr_net_ops);
1445 kmem_cache_destroy(mrt_cachep);
1446 }
1447
1448 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1449 struct mf6cctl *mfc, int mrtsock, int parent)
1450 {
1451 bool found = false;
1452 int line;
1453 struct mfc6_cache *uc, *c;
1454 unsigned char ttls[MAXMIFS];
1455 int i;
1456
1457 if (mfc->mf6cc_parent >= MAXMIFS)
1458 return -ENFILE;
1459
1460 memset(ttls, 255, MAXMIFS);
1461 for (i = 0; i < MAXMIFS; i++) {
1462 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1463 ttls[i] = 1;
1464
1465 }
1466
1467 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1468
1469 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1470 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1471 ipv6_addr_equal(&c->mf6c_mcastgrp,
1472 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1473 (parent == -1 || parent == mfc->mf6cc_parent)) {
1474 found = true;
1475 break;
1476 }
1477 }
1478
1479 if (found) {
1480 write_lock_bh(&mrt_lock);
1481 c->mf6c_parent = mfc->mf6cc_parent;
1482 ip6mr_update_thresholds(mrt, c, ttls);
1483 if (!mrtsock)
1484 c->mfc_flags |= MFC_STATIC;
1485 write_unlock_bh(&mrt_lock);
1486 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1487 return 0;
1488 }
1489
1490 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1491 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1492 return -EINVAL;
1493
1494 c = ip6mr_cache_alloc();
1495 if (c == NULL)
1496 return -ENOMEM;
1497
1498 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1499 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1500 c->mf6c_parent = mfc->mf6cc_parent;
1501 ip6mr_update_thresholds(mrt, c, ttls);
1502 if (!mrtsock)
1503 c->mfc_flags |= MFC_STATIC;
1504
1505 write_lock_bh(&mrt_lock);
1506 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1507 write_unlock_bh(&mrt_lock);
1508
1509 /*
1510 * Check to see if we resolved a queued list. If so we
1511 * need to send on the frames and tidy up.
1512 */
1513 found = false;
1514 spin_lock_bh(&mfc_unres_lock);
1515 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1516 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1517 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1518 list_del(&uc->list);
1519 atomic_dec(&mrt->cache_resolve_queue_len);
1520 found = true;
1521 break;
1522 }
1523 }
1524 if (list_empty(&mrt->mfc6_unres_queue))
1525 del_timer(&mrt->ipmr_expire_timer);
1526 spin_unlock_bh(&mfc_unres_lock);
1527
1528 if (found) {
1529 ip6mr_cache_resolve(net, mrt, uc, c);
1530 ip6mr_cache_free(uc);
1531 }
1532 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1533 return 0;
1534 }
1535
1536 /*
1537 * Close the multicast socket, and clear the vif tables etc
1538 */
1539
1540 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1541 {
1542 int i;
1543 LIST_HEAD(list);
1544 struct mfc6_cache *c, *next;
1545
1546 /*
1547 * Shut down all active vif entries
1548 */
1549 for (i = 0; i < mrt->maxvif; i++) {
1550 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1551 continue;
1552 mif6_delete(mrt, i, 0, &list);
1553 }
1554 unregister_netdevice_many(&list);
1555
1556 /*
1557 * Wipe the cache
1558 */
1559 for (i = 0; i < MFC6_LINES; i++) {
1560 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1561 if (!all && (c->mfc_flags & MFC_STATIC))
1562 continue;
1563 write_lock_bh(&mrt_lock);
1564 list_del(&c->list);
1565 write_unlock_bh(&mrt_lock);
1566
1567 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1568 ip6mr_cache_free(c);
1569 }
1570 }
1571
1572 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1573 spin_lock_bh(&mfc_unres_lock);
1574 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1575 list_del(&c->list);
1576 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1577 ip6mr_destroy_unres(mrt, c);
1578 }
1579 spin_unlock_bh(&mfc_unres_lock);
1580 }
1581 }
1582
1583 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1584 {
1585 int err = 0;
1586 struct net *net = sock_net(sk);
1587
1588 rtnl_lock();
1589 write_lock_bh(&mrt_lock);
1590 if (likely(mrt->mroute6_sk == NULL)) {
1591 mrt->mroute6_sk = sk;
1592 net->ipv6.devconf_all->mc_forwarding++;
1593 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1594 NETCONFA_IFINDEX_ALL,
1595 net->ipv6.devconf_all);
1596 }
1597 else
1598 err = -EADDRINUSE;
1599 write_unlock_bh(&mrt_lock);
1600
1601 rtnl_unlock();
1602
1603 return err;
1604 }
1605
1606 int ip6mr_sk_done(struct sock *sk)
1607 {
1608 int err = -EACCES;
1609 struct net *net = sock_net(sk);
1610 struct mr6_table *mrt;
1611
1612 rtnl_lock();
1613 ip6mr_for_each_table(mrt, net) {
1614 if (sk == mrt->mroute6_sk) {
1615 write_lock_bh(&mrt_lock);
1616 mrt->mroute6_sk = NULL;
1617 net->ipv6.devconf_all->mc_forwarding--;
1618 inet6_netconf_notify_devconf(net,
1619 NETCONFA_MC_FORWARDING,
1620 NETCONFA_IFINDEX_ALL,
1621 net->ipv6.devconf_all);
1622 write_unlock_bh(&mrt_lock);
1623
1624 mroute_clean_tables(mrt, false);
1625 err = 0;
1626 break;
1627 }
1628 }
1629 rtnl_unlock();
1630
1631 return err;
1632 }
1633
1634 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1635 {
1636 struct mr6_table *mrt;
1637 struct flowi6 fl6 = {
1638 .flowi6_iif = skb->skb_iif,
1639 .flowi6_oif = skb->dev->ifindex,
1640 .flowi6_mark = skb->mark,
1641 };
1642
1643 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1644 return NULL;
1645
1646 return mrt->mroute6_sk;
1647 }
1648
1649 /*
1650 * Socket options and virtual interface manipulation. The whole
1651 * virtual interface system is a complete heap, but unfortunately
1652 * that's how BSD mrouted happens to think. Maybe one day with a proper
1653 * MOSPF/PIM router set up we can clean this up.
1654 */
1655
1656 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1657 {
1658 int ret, parent = 0;
1659 struct mif6ctl vif;
1660 struct mf6cctl mfc;
1661 mifi_t mifi;
1662 struct net *net = sock_net(sk);
1663 struct mr6_table *mrt;
1664
1665 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1666 if (mrt == NULL)
1667 return -ENOENT;
1668
1669 if (optname != MRT6_INIT) {
1670 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1671 return -EACCES;
1672 }
1673
1674 switch (optname) {
1675 case MRT6_INIT:
1676 if (sk->sk_type != SOCK_RAW ||
1677 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1678 return -EOPNOTSUPP;
1679 if (optlen < sizeof(int))
1680 return -EINVAL;
1681
1682 return ip6mr_sk_init(mrt, sk);
1683
1684 case MRT6_DONE:
1685 return ip6mr_sk_done(sk);
1686
1687 case MRT6_ADD_MIF:
1688 if (optlen < sizeof(vif))
1689 return -EINVAL;
1690 if (copy_from_user(&vif, optval, sizeof(vif)))
1691 return -EFAULT;
1692 if (vif.mif6c_mifi >= MAXMIFS)
1693 return -ENFILE;
1694 rtnl_lock();
1695 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1696 rtnl_unlock();
1697 return ret;
1698
1699 case MRT6_DEL_MIF:
1700 if (optlen < sizeof(mifi_t))
1701 return -EINVAL;
1702 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1703 return -EFAULT;
1704 rtnl_lock();
1705 ret = mif6_delete(mrt, mifi, 0, NULL);
1706 rtnl_unlock();
1707 return ret;
1708
1709 /*
1710 * Manipulate the forwarding caches. These live
1711 * in a sort of kernel/user symbiosis.
1712 */
1713 case MRT6_ADD_MFC:
1714 case MRT6_DEL_MFC:
1715 parent = -1;
1716 case MRT6_ADD_MFC_PROXY:
1717 case MRT6_DEL_MFC_PROXY:
1718 if (optlen < sizeof(mfc))
1719 return -EINVAL;
1720 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1721 return -EFAULT;
1722 if (parent == 0)
1723 parent = mfc.mf6cc_parent;
1724 rtnl_lock();
1725 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1726 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1727 else
1728 ret = ip6mr_mfc_add(net, mrt, &mfc,
1729 sk == mrt->mroute6_sk, parent);
1730 rtnl_unlock();
1731 return ret;
1732
1733 /*
1734 * Control PIM assert (to activate pim will activate assert)
1735 */
1736 case MRT6_ASSERT:
1737 {
1738 int v;
1739
1740 if (optlen != sizeof(v))
1741 return -EINVAL;
1742 if (get_user(v, (int __user *)optval))
1743 return -EFAULT;
1744 mrt->mroute_do_assert = v;
1745 return 0;
1746 }
1747
1748 #ifdef CONFIG_IPV6_PIMSM_V2
1749 case MRT6_PIM:
1750 {
1751 int v;
1752
1753 if (optlen != sizeof(v))
1754 return -EINVAL;
1755 if (get_user(v, (int __user *)optval))
1756 return -EFAULT;
1757 v = !!v;
1758 rtnl_lock();
1759 ret = 0;
1760 if (v != mrt->mroute_do_pim) {
1761 mrt->mroute_do_pim = v;
1762 mrt->mroute_do_assert = v;
1763 }
1764 rtnl_unlock();
1765 return ret;
1766 }
1767
1768 #endif
1769 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1770 case MRT6_TABLE:
1771 {
1772 u32 v;
1773
1774 if (optlen != sizeof(u32))
1775 return -EINVAL;
1776 if (get_user(v, (u32 __user *)optval))
1777 return -EFAULT;
1778 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1779 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1780 return -EINVAL;
1781 if (sk == mrt->mroute6_sk)
1782 return -EBUSY;
1783
1784 rtnl_lock();
1785 ret = 0;
1786 if (!ip6mr_new_table(net, v))
1787 ret = -ENOMEM;
1788 raw6_sk(sk)->ip6mr_table = v;
1789 rtnl_unlock();
1790 return ret;
1791 }
1792 #endif
1793 /*
1794 * Spurious command, or MRT6_VERSION which you cannot
1795 * set.
1796 */
1797 default:
1798 return -ENOPROTOOPT;
1799 }
1800 }
1801
1802 /*
1803 * Getsock opt support for the multicast routing system.
1804 */
1805
1806 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1807 int __user *optlen)
1808 {
1809 int olr;
1810 int val;
1811 struct net *net = sock_net(sk);
1812 struct mr6_table *mrt;
1813
1814 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1815 if (mrt == NULL)
1816 return -ENOENT;
1817
1818 switch (optname) {
1819 case MRT6_VERSION:
1820 val = 0x0305;
1821 break;
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1823 case MRT6_PIM:
1824 val = mrt->mroute_do_pim;
1825 break;
1826 #endif
1827 case MRT6_ASSERT:
1828 val = mrt->mroute_do_assert;
1829 break;
1830 default:
1831 return -ENOPROTOOPT;
1832 }
1833
1834 if (get_user(olr, optlen))
1835 return -EFAULT;
1836
1837 olr = min_t(int, olr, sizeof(int));
1838 if (olr < 0)
1839 return -EINVAL;
1840
1841 if (put_user(olr, optlen))
1842 return -EFAULT;
1843 if (copy_to_user(optval, &val, olr))
1844 return -EFAULT;
1845 return 0;
1846 }
1847
1848 /*
1849 * The IP multicast ioctl support routines.
1850 */
1851
1852 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1853 {
1854 struct sioc_sg_req6 sr;
1855 struct sioc_mif_req6 vr;
1856 struct mif_device *vif;
1857 struct mfc6_cache *c;
1858 struct net *net = sock_net(sk);
1859 struct mr6_table *mrt;
1860
1861 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1862 if (mrt == NULL)
1863 return -ENOENT;
1864
1865 switch (cmd) {
1866 case SIOCGETMIFCNT_IN6:
1867 if (copy_from_user(&vr, arg, sizeof(vr)))
1868 return -EFAULT;
1869 if (vr.mifi >= mrt->maxvif)
1870 return -EINVAL;
1871 read_lock(&mrt_lock);
1872 vif = &mrt->vif6_table[vr.mifi];
1873 if (MIF_EXISTS(mrt, vr.mifi)) {
1874 vr.icount = vif->pkt_in;
1875 vr.ocount = vif->pkt_out;
1876 vr.ibytes = vif->bytes_in;
1877 vr.obytes = vif->bytes_out;
1878 read_unlock(&mrt_lock);
1879
1880 if (copy_to_user(arg, &vr, sizeof(vr)))
1881 return -EFAULT;
1882 return 0;
1883 }
1884 read_unlock(&mrt_lock);
1885 return -EADDRNOTAVAIL;
1886 case SIOCGETSGCNT_IN6:
1887 if (copy_from_user(&sr, arg, sizeof(sr)))
1888 return -EFAULT;
1889
1890 read_lock(&mrt_lock);
1891 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1892 if (c) {
1893 sr.pktcnt = c->mfc_un.res.pkt;
1894 sr.bytecnt = c->mfc_un.res.bytes;
1895 sr.wrong_if = c->mfc_un.res.wrong_if;
1896 read_unlock(&mrt_lock);
1897
1898 if (copy_to_user(arg, &sr, sizeof(sr)))
1899 return -EFAULT;
1900 return 0;
1901 }
1902 read_unlock(&mrt_lock);
1903 return -EADDRNOTAVAIL;
1904 default:
1905 return -ENOIOCTLCMD;
1906 }
1907 }
1908
1909 #ifdef CONFIG_COMPAT
1910 struct compat_sioc_sg_req6 {
1911 struct sockaddr_in6 src;
1912 struct sockaddr_in6 grp;
1913 compat_ulong_t pktcnt;
1914 compat_ulong_t bytecnt;
1915 compat_ulong_t wrong_if;
1916 };
1917
1918 struct compat_sioc_mif_req6 {
1919 mifi_t mifi;
1920 compat_ulong_t icount;
1921 compat_ulong_t ocount;
1922 compat_ulong_t ibytes;
1923 compat_ulong_t obytes;
1924 };
1925
1926 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1927 {
1928 struct compat_sioc_sg_req6 sr;
1929 struct compat_sioc_mif_req6 vr;
1930 struct mif_device *vif;
1931 struct mfc6_cache *c;
1932 struct net *net = sock_net(sk);
1933 struct mr6_table *mrt;
1934
1935 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1936 if (mrt == NULL)
1937 return -ENOENT;
1938
1939 switch (cmd) {
1940 case SIOCGETMIFCNT_IN6:
1941 if (copy_from_user(&vr, arg, sizeof(vr)))
1942 return -EFAULT;
1943 if (vr.mifi >= mrt->maxvif)
1944 return -EINVAL;
1945 read_lock(&mrt_lock);
1946 vif = &mrt->vif6_table[vr.mifi];
1947 if (MIF_EXISTS(mrt, vr.mifi)) {
1948 vr.icount = vif->pkt_in;
1949 vr.ocount = vif->pkt_out;
1950 vr.ibytes = vif->bytes_in;
1951 vr.obytes = vif->bytes_out;
1952 read_unlock(&mrt_lock);
1953
1954 if (copy_to_user(arg, &vr, sizeof(vr)))
1955 return -EFAULT;
1956 return 0;
1957 }
1958 read_unlock(&mrt_lock);
1959 return -EADDRNOTAVAIL;
1960 case SIOCGETSGCNT_IN6:
1961 if (copy_from_user(&sr, arg, sizeof(sr)))
1962 return -EFAULT;
1963
1964 read_lock(&mrt_lock);
1965 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1966 if (c) {
1967 sr.pktcnt = c->mfc_un.res.pkt;
1968 sr.bytecnt = c->mfc_un.res.bytes;
1969 sr.wrong_if = c->mfc_un.res.wrong_if;
1970 read_unlock(&mrt_lock);
1971
1972 if (copy_to_user(arg, &sr, sizeof(sr)))
1973 return -EFAULT;
1974 return 0;
1975 }
1976 read_unlock(&mrt_lock);
1977 return -EADDRNOTAVAIL;
1978 default:
1979 return -ENOIOCTLCMD;
1980 }
1981 }
1982 #endif
1983
1984 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1985 {
1986 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1987 IPSTATS_MIB_OUTFORWDATAGRAMS);
1988 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTOCTETS, skb->len);
1990 return dst_output(skb);
1991 }
1992
1993 /*
1994 * Processing handlers for ip6mr_forward
1995 */
1996
1997 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1998 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1999 {
2000 struct ipv6hdr *ipv6h;
2001 struct mif_device *vif = &mrt->vif6_table[vifi];
2002 struct net_device *dev;
2003 struct dst_entry *dst;
2004 struct flowi6 fl6;
2005
2006 if (vif->dev == NULL)
2007 goto out_free;
2008
2009 #ifdef CONFIG_IPV6_PIMSM_V2
2010 if (vif->flags & MIFF_REGISTER) {
2011 vif->pkt_out++;
2012 vif->bytes_out += skb->len;
2013 vif->dev->stats.tx_bytes += skb->len;
2014 vif->dev->stats.tx_packets++;
2015 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2016 goto out_free;
2017 }
2018 #endif
2019
2020 ipv6h = ipv6_hdr(skb);
2021
2022 fl6 = (struct flowi6) {
2023 .flowi6_oif = vif->link,
2024 .daddr = ipv6h->daddr,
2025 };
2026
2027 dst = ip6_route_output(net, NULL, &fl6);
2028 if (dst->error) {
2029 dst_release(dst);
2030 goto out_free;
2031 }
2032
2033 skb_dst_drop(skb);
2034 skb_dst_set(skb, dst);
2035
2036 /*
2037 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2038 * not only before forwarding, but after forwarding on all output
2039 * interfaces. It is clear, if mrouter runs a multicasting
2040 * program, it should receive packets not depending to what interface
2041 * program is joined.
2042 * If we will not make it, the program will have to join on all
2043 * interfaces. On the other hand, multihoming host (or router, but
2044 * not mrouter) cannot join to more than one interface - it will
2045 * result in receiving multiple packets.
2046 */
2047 dev = vif->dev;
2048 skb->dev = dev;
2049 vif->pkt_out++;
2050 vif->bytes_out += skb->len;
2051
2052 /* We are about to write */
2053 /* XXX: extension headers? */
2054 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2055 goto out_free;
2056
2057 ipv6h = ipv6_hdr(skb);
2058 ipv6h->hop_limit--;
2059
2060 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2061
2062 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2063 ip6mr_forward2_finish);
2064
2065 out_free:
2066 kfree_skb(skb);
2067 return 0;
2068 }
2069
2070 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2071 {
2072 int ct;
2073
2074 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2075 if (mrt->vif6_table[ct].dev == dev)
2076 break;
2077 }
2078 return ct;
2079 }
2080
2081 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2082 struct sk_buff *skb, struct mfc6_cache *cache)
2083 {
2084 int psend = -1;
2085 int vif, ct;
2086 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2087
2088 vif = cache->mf6c_parent;
2089 cache->mfc_un.res.pkt++;
2090 cache->mfc_un.res.bytes += skb->len;
2091
2092 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2093 struct mfc6_cache *cache_proxy;
2094
2095 /* For an (*,G) entry, we only check that the incomming
2096 * interface is part of the static tree.
2097 */
2098 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2099 if (cache_proxy &&
2100 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2101 goto forward;
2102 }
2103
2104 /*
2105 * Wrong interface: drop packet and (maybe) send PIM assert.
2106 */
2107 if (mrt->vif6_table[vif].dev != skb->dev) {
2108 cache->mfc_un.res.wrong_if++;
2109
2110 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2111 /* pimsm uses asserts, when switching from RPT to SPT,
2112 so that we cannot check that packet arrived on an oif.
2113 It is bad, but otherwise we would need to move pretty
2114 large chunk of pimd to kernel. Ough... --ANK
2115 */
2116 (mrt->mroute_do_pim ||
2117 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2118 time_after(jiffies,
2119 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2120 cache->mfc_un.res.last_assert = jiffies;
2121 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2122 }
2123 goto dont_forward;
2124 }
2125
2126 forward:
2127 mrt->vif6_table[vif].pkt_in++;
2128 mrt->vif6_table[vif].bytes_in += skb->len;
2129
2130 /*
2131 * Forward the frame
2132 */
2133 if (ipv6_addr_any(&cache->mf6c_origin) &&
2134 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2135 if (true_vifi >= 0 &&
2136 true_vifi != cache->mf6c_parent &&
2137 ipv6_hdr(skb)->hop_limit >
2138 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2139 /* It's an (*,*) entry and the packet is not coming from
2140 * the upstream: forward the packet to the upstream
2141 * only.
2142 */
2143 psend = cache->mf6c_parent;
2144 goto last_forward;
2145 }
2146 goto dont_forward;
2147 }
2148 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2149 /* For (*,G) entry, don't forward to the incoming interface */
2150 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2151 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2152 if (psend != -1) {
2153 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2154 if (skb2)
2155 ip6mr_forward2(net, mrt, skb2, cache, psend);
2156 }
2157 psend = ct;
2158 }
2159 }
2160 last_forward:
2161 if (psend != -1) {
2162 ip6mr_forward2(net, mrt, skb, cache, psend);
2163 return 0;
2164 }
2165
2166 dont_forward:
2167 kfree_skb(skb);
2168 return 0;
2169 }
2170
2171
2172 /*
2173 * Multicast packets for forwarding arrive here
2174 */
2175
2176 int ip6_mr_input(struct sk_buff *skb)
2177 {
2178 struct mfc6_cache *cache;
2179 struct net *net = dev_net(skb->dev);
2180 struct mr6_table *mrt;
2181 struct flowi6 fl6 = {
2182 .flowi6_iif = skb->dev->ifindex,
2183 .flowi6_mark = skb->mark,
2184 };
2185 int err;
2186
2187 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2188 if (err < 0) {
2189 kfree_skb(skb);
2190 return err;
2191 }
2192
2193 read_lock(&mrt_lock);
2194 cache = ip6mr_cache_find(mrt,
2195 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2196 if (cache == NULL) {
2197 int vif = ip6mr_find_vif(mrt, skb->dev);
2198
2199 if (vif >= 0)
2200 cache = ip6mr_cache_find_any(mrt,
2201 &ipv6_hdr(skb)->daddr,
2202 vif);
2203 }
2204
2205 /*
2206 * No usable cache entry
2207 */
2208 if (cache == NULL) {
2209 int vif;
2210
2211 vif = ip6mr_find_vif(mrt, skb->dev);
2212 if (vif >= 0) {
2213 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2214 read_unlock(&mrt_lock);
2215
2216 return err;
2217 }
2218 read_unlock(&mrt_lock);
2219 kfree_skb(skb);
2220 return -ENODEV;
2221 }
2222
2223 ip6_mr_forward(net, mrt, skb, cache);
2224
2225 read_unlock(&mrt_lock);
2226
2227 return 0;
2228 }
2229
2230
2231 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2232 struct mfc6_cache *c, struct rtmsg *rtm)
2233 {
2234 int ct;
2235 struct rtnexthop *nhp;
2236 struct nlattr *mp_attr;
2237 struct rta_mfc_stats mfcs;
2238
2239 /* If cache is unresolved, don't try to parse IIF and OIF */
2240 if (c->mf6c_parent >= MAXMIFS)
2241 return -ENOENT;
2242
2243 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2244 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2245 return -EMSGSIZE;
2246 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2247 if (mp_attr == NULL)
2248 return -EMSGSIZE;
2249
2250 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2251 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2252 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2253 if (nhp == NULL) {
2254 nla_nest_cancel(skb, mp_attr);
2255 return -EMSGSIZE;
2256 }
2257
2258 nhp->rtnh_flags = 0;
2259 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2260 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2261 nhp->rtnh_len = sizeof(*nhp);
2262 }
2263 }
2264
2265 nla_nest_end(skb, mp_attr);
2266
2267 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2268 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2269 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2270 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2271 return -EMSGSIZE;
2272
2273 rtm->rtm_type = RTN_MULTICAST;
2274 return 1;
2275 }
2276
2277 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2278 int nowait, u32 portid)
2279 {
2280 int err;
2281 struct mr6_table *mrt;
2282 struct mfc6_cache *cache;
2283 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2284
2285 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2286 if (mrt == NULL)
2287 return -ENOENT;
2288
2289 read_lock(&mrt_lock);
2290 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2291 if (!cache && skb->dev) {
2292 int vif = ip6mr_find_vif(mrt, skb->dev);
2293
2294 if (vif >= 0)
2295 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2296 vif);
2297 }
2298
2299 if (!cache) {
2300 struct sk_buff *skb2;
2301 struct ipv6hdr *iph;
2302 struct net_device *dev;
2303 int vif;
2304
2305 if (nowait) {
2306 read_unlock(&mrt_lock);
2307 return -EAGAIN;
2308 }
2309
2310 dev = skb->dev;
2311 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2312 read_unlock(&mrt_lock);
2313 return -ENODEV;
2314 }
2315
2316 /* really correct? */
2317 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2318 if (!skb2) {
2319 read_unlock(&mrt_lock);
2320 return -ENOMEM;
2321 }
2322
2323 NETLINK_CB(skb2).portid = portid;
2324 skb_reset_transport_header(skb2);
2325
2326 skb_put(skb2, sizeof(struct ipv6hdr));
2327 skb_reset_network_header(skb2);
2328
2329 iph = ipv6_hdr(skb2);
2330 iph->version = 0;
2331 iph->priority = 0;
2332 iph->flow_lbl[0] = 0;
2333 iph->flow_lbl[1] = 0;
2334 iph->flow_lbl[2] = 0;
2335 iph->payload_len = 0;
2336 iph->nexthdr = IPPROTO_NONE;
2337 iph->hop_limit = 0;
2338 iph->saddr = rt->rt6i_src.addr;
2339 iph->daddr = rt->rt6i_dst.addr;
2340
2341 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2342 read_unlock(&mrt_lock);
2343
2344 return err;
2345 }
2346
2347 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2348 cache->mfc_flags |= MFC_NOTIFY;
2349
2350 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2351 read_unlock(&mrt_lock);
2352 return err;
2353 }
2354
2355 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2356 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2357 int flags)
2358 {
2359 struct nlmsghdr *nlh;
2360 struct rtmsg *rtm;
2361 int err;
2362
2363 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2364 if (nlh == NULL)
2365 return -EMSGSIZE;
2366
2367 rtm = nlmsg_data(nlh);
2368 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2369 rtm->rtm_dst_len = 128;
2370 rtm->rtm_src_len = 128;
2371 rtm->rtm_tos = 0;
2372 rtm->rtm_table = mrt->id;
2373 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2374 goto nla_put_failure;
2375 rtm->rtm_type = RTN_MULTICAST;
2376 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2377 if (c->mfc_flags & MFC_STATIC)
2378 rtm->rtm_protocol = RTPROT_STATIC;
2379 else
2380 rtm->rtm_protocol = RTPROT_MROUTED;
2381 rtm->rtm_flags = 0;
2382
2383 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2384 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2385 goto nla_put_failure;
2386 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2387 /* do not break the dump if cache is unresolved */
2388 if (err < 0 && err != -ENOENT)
2389 goto nla_put_failure;
2390
2391 return nlmsg_end(skb, nlh);
2392
2393 nla_put_failure:
2394 nlmsg_cancel(skb, nlh);
2395 return -EMSGSIZE;
2396 }
2397
2398 static int mr6_msgsize(bool unresolved, int maxvif)
2399 {
2400 size_t len =
2401 NLMSG_ALIGN(sizeof(struct rtmsg))
2402 + nla_total_size(4) /* RTA_TABLE */
2403 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2404 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2405 ;
2406
2407 if (!unresolved)
2408 len = len
2409 + nla_total_size(4) /* RTA_IIF */
2410 + nla_total_size(0) /* RTA_MULTIPATH */
2411 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2412 /* RTA_MFC_STATS */
2413 + nla_total_size(sizeof(struct rta_mfc_stats))
2414 ;
2415
2416 return len;
2417 }
2418
2419 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2420 int cmd)
2421 {
2422 struct net *net = read_pnet(&mrt->net);
2423 struct sk_buff *skb;
2424 int err = -ENOBUFS;
2425
2426 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2427 GFP_ATOMIC);
2428 if (skb == NULL)
2429 goto errout;
2430
2431 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2432 if (err < 0)
2433 goto errout;
2434
2435 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2436 return;
2437
2438 errout:
2439 kfree_skb(skb);
2440 if (err < 0)
2441 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2442 }
2443
2444 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2445 {
2446 struct net *net = sock_net(skb->sk);
2447 struct mr6_table *mrt;
2448 struct mfc6_cache *mfc;
2449 unsigned int t = 0, s_t;
2450 unsigned int h = 0, s_h;
2451 unsigned int e = 0, s_e;
2452
2453 s_t = cb->args[0];
2454 s_h = cb->args[1];
2455 s_e = cb->args[2];
2456
2457 read_lock(&mrt_lock);
2458 ip6mr_for_each_table(mrt, net) {
2459 if (t < s_t)
2460 goto next_table;
2461 if (t > s_t)
2462 s_h = 0;
2463 for (h = s_h; h < MFC6_LINES; h++) {
2464 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2465 if (e < s_e)
2466 goto next_entry;
2467 if (ip6mr_fill_mroute(mrt, skb,
2468 NETLINK_CB(cb->skb).portid,
2469 cb->nlh->nlmsg_seq,
2470 mfc, RTM_NEWROUTE,
2471 NLM_F_MULTI) < 0)
2472 goto done;
2473 next_entry:
2474 e++;
2475 }
2476 e = s_e = 0;
2477 }
2478 spin_lock_bh(&mfc_unres_lock);
2479 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2480 if (e < s_e)
2481 goto next_entry2;
2482 if (ip6mr_fill_mroute(mrt, skb,
2483 NETLINK_CB(cb->skb).portid,
2484 cb->nlh->nlmsg_seq,
2485 mfc, RTM_NEWROUTE,
2486 NLM_F_MULTI) < 0) {
2487 spin_unlock_bh(&mfc_unres_lock);
2488 goto done;
2489 }
2490 next_entry2:
2491 e++;
2492 }
2493 spin_unlock_bh(&mfc_unres_lock);
2494 e = s_e = 0;
2495 s_h = 0;
2496 next_table:
2497 t++;
2498 }
2499 done:
2500 read_unlock(&mrt_lock);
2501
2502 cb->args[2] = e;
2503 cb->args[1] = h;
2504 cb->args[0] = t;
2505
2506 return skb->len;
2507 }