net: avoid reference counter overflows on fib_rules in multicast forwarding
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6mr.c
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56
57 struct mr6_table {
58 struct list_head list;
59 #ifdef CONFIG_NET_NS
60 struct net *net;
61 #endif
62 u32 id;
63 struct sock *mroute6_sk;
64 struct timer_list ipmr_expire_timer;
65 struct list_head mfc6_unres_queue;
66 struct list_head mfc6_cache_array[MFC6_LINES];
67 struct mif_device vif6_table[MAXMIFS];
68 int maxvif;
69 atomic_t cache_resolve_queue_len;
70 bool mroute_do_assert;
71 bool mroute_do_pim;
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num;
74 #endif
75 };
76
77 struct ip6mr_rule {
78 struct fib_rule common;
79 };
80
81 struct ip6mr_result {
82 struct mr6_table *mrt;
83 };
84
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
87 */
88
89 static DEFINE_RWLOCK(mrt_lock);
90
91 /*
92 * Multicast router control variables
93 */
94
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
96
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
99
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
104
105 In this case data path is free of exclusive locks at all.
106 */
107
108 static struct kmem_cache *mrt_cachep __read_mostly;
109
110 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
111 static void ip6mr_free_table(struct mr6_table *mrt);
112
113 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
114 struct sk_buff *skb, struct mfc6_cache *cache);
115 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
116 mifi_t mifi, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
118 struct mfc6_cache *c, struct rtmsg *rtm);
119 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
120 int cmd);
121 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
123 static void mroute_clean_tables(struct mr6_table *mrt);
124 static void ipmr_expire_process(unsigned long arg);
125
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129
130 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131 {
132 struct mr6_table *mrt;
133
134 ip6mr_for_each_table(mrt, net) {
135 if (mrt->id == id)
136 return mrt;
137 }
138 return NULL;
139 }
140
141 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt)
143 {
144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
150
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
153 if (err < 0)
154 return err;
155 *mrt = res.mrt;
156 return 0;
157 }
158
159 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
161 {
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
164
165 switch (rule->action) {
166 case FR_ACT_TO_TBL:
167 break;
168 case FR_ACT_UNREACHABLE:
169 return -ENETUNREACH;
170 case FR_ACT_PROHIBIT:
171 return -EACCES;
172 case FR_ACT_BLACKHOLE:
173 default:
174 return -EINVAL;
175 }
176
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
178 if (mrt == NULL)
179 return -EAGAIN;
180 res->mrt = mrt;
181 return 0;
182 }
183
184 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185 {
186 return 1;
187 }
188
189 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
190 FRA_GENERIC_POLICY,
191 };
192
193 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
195 {
196 return 0;
197 }
198
199 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 struct nlattr **tb)
201 {
202 return 1;
203 }
204
205 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
207 {
208 frh->dst_len = 0;
209 frh->src_len = 0;
210 frh->tos = 0;
211 return 0;
212 }
213
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
222 .default_pref = fib_default_rule_pref,
223 .fill = ip6mr_rule_fill,
224 .nlgroup = RTNLGRP_IPV6_RULE,
225 .policy = ip6mr_rule_policy,
226 .owner = THIS_MODULE,
227 };
228
229 static int __net_init ip6mr_rules_init(struct net *net)
230 {
231 struct fib_rules_ops *ops;
232 struct mr6_table *mrt;
233 int err;
234
235 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
236 if (IS_ERR(ops))
237 return PTR_ERR(ops);
238
239 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
240
241 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
242 if (mrt == NULL) {
243 err = -ENOMEM;
244 goto err1;
245 }
246
247 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
248 if (err < 0)
249 goto err2;
250
251 net->ipv6.mr6_rules_ops = ops;
252 return 0;
253
254 err2:
255 kfree(mrt);
256 err1:
257 fib_rules_unregister(ops);
258 return err;
259 }
260
261 static void __net_exit ip6mr_rules_exit(struct net *net)
262 {
263 struct mr6_table *mrt, *next;
264
265 rtnl_lock();
266 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
267 list_del(&mrt->list);
268 ip6mr_free_table(mrt);
269 }
270 rtnl_unlock();
271 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272 }
273 #else
274 #define ip6mr_for_each_table(mrt, net) \
275 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
276
277 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
278 {
279 return net->ipv6.mrt6;
280 }
281
282 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
283 struct mr6_table **mrt)
284 {
285 *mrt = net->ipv6.mrt6;
286 return 0;
287 }
288
289 static int __net_init ip6mr_rules_init(struct net *net)
290 {
291 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
292 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293 }
294
295 static void __net_exit ip6mr_rules_exit(struct net *net)
296 {
297 rtnl_lock();
298 ip6mr_free_table(net->ipv6.mrt6);
299 net->ipv6.mrt6 = NULL;
300 rtnl_unlock();
301 }
302 #endif
303
304 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
305 {
306 struct mr6_table *mrt;
307 unsigned int i;
308
309 mrt = ip6mr_get_table(net, id);
310 if (mrt != NULL)
311 return mrt;
312
313 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
314 if (mrt == NULL)
315 return NULL;
316 mrt->id = id;
317 write_pnet(&mrt->net, net);
318
319 /* Forwarding cache */
320 for (i = 0; i < MFC6_LINES; i++)
321 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
322
323 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
324
325 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 (unsigned long)mrt);
327
328 #ifdef CONFIG_IPV6_PIMSM_V2
329 mrt->mroute_reg_vif_num = -1;
330 #endif
331 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
332 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
333 #endif
334 return mrt;
335 }
336
337 static void ip6mr_free_table(struct mr6_table *mrt)
338 {
339 del_timer(&mrt->ipmr_expire_timer);
340 mroute_clean_tables(mrt);
341 kfree(mrt);
342 }
343
344 #ifdef CONFIG_PROC_FS
345
346 struct ipmr_mfc_iter {
347 struct seq_net_private p;
348 struct mr6_table *mrt;
349 struct list_head *cache;
350 int ct;
351 };
352
353
354 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
355 struct ipmr_mfc_iter *it, loff_t pos)
356 {
357 struct mr6_table *mrt = it->mrt;
358 struct mfc6_cache *mfc;
359
360 read_lock(&mrt_lock);
361 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
362 it->cache = &mrt->mfc6_cache_array[it->ct];
363 list_for_each_entry(mfc, it->cache, list)
364 if (pos-- == 0)
365 return mfc;
366 }
367 read_unlock(&mrt_lock);
368
369 spin_lock_bh(&mfc_unres_lock);
370 it->cache = &mrt->mfc6_unres_queue;
371 list_for_each_entry(mfc, it->cache, list)
372 if (pos-- == 0)
373 return mfc;
374 spin_unlock_bh(&mfc_unres_lock);
375
376 it->cache = NULL;
377 return NULL;
378 }
379
380 /*
381 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 */
383
384 struct ipmr_vif_iter {
385 struct seq_net_private p;
386 struct mr6_table *mrt;
387 int ct;
388 };
389
390 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
391 struct ipmr_vif_iter *iter,
392 loff_t pos)
393 {
394 struct mr6_table *mrt = iter->mrt;
395
396 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
397 if (!MIF_EXISTS(mrt, iter->ct))
398 continue;
399 if (pos-- == 0)
400 return &mrt->vif6_table[iter->ct];
401 }
402 return NULL;
403 }
404
405 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 __acquires(mrt_lock)
407 {
408 struct ipmr_vif_iter *iter = seq->private;
409 struct net *net = seq_file_net(seq);
410 struct mr6_table *mrt;
411
412 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
413 if (mrt == NULL)
414 return ERR_PTR(-ENOENT);
415
416 iter->mrt = mrt;
417
418 read_lock(&mrt_lock);
419 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
420 : SEQ_START_TOKEN;
421 }
422
423 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
424 {
425 struct ipmr_vif_iter *iter = seq->private;
426 struct net *net = seq_file_net(seq);
427 struct mr6_table *mrt = iter->mrt;
428
429 ++*pos;
430 if (v == SEQ_START_TOKEN)
431 return ip6mr_vif_seq_idx(net, iter, 0);
432
433 while (++iter->ct < mrt->maxvif) {
434 if (!MIF_EXISTS(mrt, iter->ct))
435 continue;
436 return &mrt->vif6_table[iter->ct];
437 }
438 return NULL;
439 }
440
441 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
442 __releases(mrt_lock)
443 {
444 read_unlock(&mrt_lock);
445 }
446
447 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
448 {
449 struct ipmr_vif_iter *iter = seq->private;
450 struct mr6_table *mrt = iter->mrt;
451
452 if (v == SEQ_START_TOKEN) {
453 seq_puts(seq,
454 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
455 } else {
456 const struct mif_device *vif = v;
457 const char *name = vif->dev ? vif->dev->name : "none";
458
459 seq_printf(seq,
460 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
461 vif - mrt->vif6_table,
462 name, vif->bytes_in, vif->pkt_in,
463 vif->bytes_out, vif->pkt_out,
464 vif->flags);
465 }
466 return 0;
467 }
468
469 static const struct seq_operations ip6mr_vif_seq_ops = {
470 .start = ip6mr_vif_seq_start,
471 .next = ip6mr_vif_seq_next,
472 .stop = ip6mr_vif_seq_stop,
473 .show = ip6mr_vif_seq_show,
474 };
475
476 static int ip6mr_vif_open(struct inode *inode, struct file *file)
477 {
478 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
479 sizeof(struct ipmr_vif_iter));
480 }
481
482 static const struct file_operations ip6mr_vif_fops = {
483 .owner = THIS_MODULE,
484 .open = ip6mr_vif_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
487 .release = seq_release_net,
488 };
489
490 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
491 {
492 struct ipmr_mfc_iter *it = seq->private;
493 struct net *net = seq_file_net(seq);
494 struct mr6_table *mrt;
495
496 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
497 if (mrt == NULL)
498 return ERR_PTR(-ENOENT);
499
500 it->mrt = mrt;
501 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
502 : SEQ_START_TOKEN;
503 }
504
505 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
506 {
507 struct mfc6_cache *mfc = v;
508 struct ipmr_mfc_iter *it = seq->private;
509 struct net *net = seq_file_net(seq);
510 struct mr6_table *mrt = it->mrt;
511
512 ++*pos;
513
514 if (v == SEQ_START_TOKEN)
515 return ipmr_mfc_seq_idx(net, seq->private, 0);
516
517 if (mfc->list.next != it->cache)
518 return list_entry(mfc->list.next, struct mfc6_cache, list);
519
520 if (it->cache == &mrt->mfc6_unres_queue)
521 goto end_of_list;
522
523 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
524
525 while (++it->ct < MFC6_LINES) {
526 it->cache = &mrt->mfc6_cache_array[it->ct];
527 if (list_empty(it->cache))
528 continue;
529 return list_first_entry(it->cache, struct mfc6_cache, list);
530 }
531
532 /* exhausted cache_array, show unresolved */
533 read_unlock(&mrt_lock);
534 it->cache = &mrt->mfc6_unres_queue;
535 it->ct = 0;
536
537 spin_lock_bh(&mfc_unres_lock);
538 if (!list_empty(it->cache))
539 return list_first_entry(it->cache, struct mfc6_cache, list);
540
541 end_of_list:
542 spin_unlock_bh(&mfc_unres_lock);
543 it->cache = NULL;
544
545 return NULL;
546 }
547
548 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
549 {
550 struct ipmr_mfc_iter *it = seq->private;
551 struct mr6_table *mrt = it->mrt;
552
553 if (it->cache == &mrt->mfc6_unres_queue)
554 spin_unlock_bh(&mfc_unres_lock);
555 else if (it->cache == mrt->mfc6_cache_array)
556 read_unlock(&mrt_lock);
557 }
558
559 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
560 {
561 int n;
562
563 if (v == SEQ_START_TOKEN) {
564 seq_puts(seq,
565 "Group "
566 "Origin "
567 "Iif Pkts Bytes Wrong Oifs\n");
568 } else {
569 const struct mfc6_cache *mfc = v;
570 const struct ipmr_mfc_iter *it = seq->private;
571 struct mr6_table *mrt = it->mrt;
572
573 seq_printf(seq, "%pI6 %pI6 %-3hd",
574 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
575 mfc->mf6c_parent);
576
577 if (it->cache != &mrt->mfc6_unres_queue) {
578 seq_printf(seq, " %8lu %8lu %8lu",
579 mfc->mfc_un.res.pkt,
580 mfc->mfc_un.res.bytes,
581 mfc->mfc_un.res.wrong_if);
582 for (n = mfc->mfc_un.res.minvif;
583 n < mfc->mfc_un.res.maxvif; n++) {
584 if (MIF_EXISTS(mrt, n) &&
585 mfc->mfc_un.res.ttls[n] < 255)
586 seq_printf(seq,
587 " %2d:%-3d",
588 n, mfc->mfc_un.res.ttls[n]);
589 }
590 } else {
591 /* unresolved mfc_caches don't contain
592 * pkt, bytes and wrong_if values
593 */
594 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
595 }
596 seq_putc(seq, '\n');
597 }
598 return 0;
599 }
600
601 static const struct seq_operations ipmr_mfc_seq_ops = {
602 .start = ipmr_mfc_seq_start,
603 .next = ipmr_mfc_seq_next,
604 .stop = ipmr_mfc_seq_stop,
605 .show = ipmr_mfc_seq_show,
606 };
607
608 static int ipmr_mfc_open(struct inode *inode, struct file *file)
609 {
610 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
611 sizeof(struct ipmr_mfc_iter));
612 }
613
614 static const struct file_operations ip6mr_mfc_fops = {
615 .owner = THIS_MODULE,
616 .open = ipmr_mfc_open,
617 .read = seq_read,
618 .llseek = seq_lseek,
619 .release = seq_release_net,
620 };
621 #endif
622
623 #ifdef CONFIG_IPV6_PIMSM_V2
624
625 static int pim6_rcv(struct sk_buff *skb)
626 {
627 struct pimreghdr *pim;
628 struct ipv6hdr *encap;
629 struct net_device *reg_dev = NULL;
630 struct net *net = dev_net(skb->dev);
631 struct mr6_table *mrt;
632 struct flowi6 fl6 = {
633 .flowi6_iif = skb->dev->ifindex,
634 .flowi6_mark = skb->mark,
635 };
636 int reg_vif_num;
637
638 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
639 goto drop;
640
641 pim = (struct pimreghdr *)skb_transport_header(skb);
642 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
643 (pim->flags & PIM_NULL_REGISTER) ||
644 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
645 sizeof(*pim), IPPROTO_PIM,
646 csum_partial((void *)pim, sizeof(*pim), 0)) &&
647 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
648 goto drop;
649
650 /* check if the inner packet is destined to mcast group */
651 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
652 sizeof(*pim));
653
654 if (!ipv6_addr_is_multicast(&encap->daddr) ||
655 encap->payload_len == 0 ||
656 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
657 goto drop;
658
659 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
660 goto drop;
661 reg_vif_num = mrt->mroute_reg_vif_num;
662
663 read_lock(&mrt_lock);
664 if (reg_vif_num >= 0)
665 reg_dev = mrt->vif6_table[reg_vif_num].dev;
666 if (reg_dev)
667 dev_hold(reg_dev);
668 read_unlock(&mrt_lock);
669
670 if (reg_dev == NULL)
671 goto drop;
672
673 skb->mac_header = skb->network_header;
674 skb_pull(skb, (u8 *)encap - skb->data);
675 skb_reset_network_header(skb);
676 skb->protocol = htons(ETH_P_IPV6);
677 skb->ip_summed = CHECKSUM_NONE;
678 skb->pkt_type = PACKET_HOST;
679
680 skb_tunnel_rx(skb, reg_dev);
681
682 netif_rx(skb);
683
684 dev_put(reg_dev);
685 return 0;
686 drop:
687 kfree_skb(skb);
688 return 0;
689 }
690
691 static const struct inet6_protocol pim6_protocol = {
692 .handler = pim6_rcv,
693 };
694
695 /* Service routines creating virtual interfaces: PIMREG */
696
697 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
698 struct net_device *dev)
699 {
700 struct net *net = dev_net(dev);
701 struct mr6_table *mrt;
702 struct flowi6 fl6 = {
703 .flowi6_oif = dev->ifindex,
704 .flowi6_iif = skb->skb_iif,
705 .flowi6_mark = skb->mark,
706 };
707 int err;
708
709 err = ip6mr_fib_lookup(net, &fl6, &mrt);
710 if (err < 0) {
711 kfree_skb(skb);
712 return err;
713 }
714
715 read_lock(&mrt_lock);
716 dev->stats.tx_bytes += skb->len;
717 dev->stats.tx_packets++;
718 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
719 read_unlock(&mrt_lock);
720 kfree_skb(skb);
721 return NETDEV_TX_OK;
722 }
723
724 static const struct net_device_ops reg_vif_netdev_ops = {
725 .ndo_start_xmit = reg_vif_xmit,
726 };
727
728 static void reg_vif_setup(struct net_device *dev)
729 {
730 dev->type = ARPHRD_PIMREG;
731 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
732 dev->flags = IFF_NOARP;
733 dev->netdev_ops = &reg_vif_netdev_ops;
734 dev->destructor = free_netdev;
735 dev->features |= NETIF_F_NETNS_LOCAL;
736 }
737
738 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
739 {
740 struct net_device *dev;
741 char name[IFNAMSIZ];
742
743 if (mrt->id == RT6_TABLE_DFLT)
744 sprintf(name, "pim6reg");
745 else
746 sprintf(name, "pim6reg%u", mrt->id);
747
748 dev = alloc_netdev(0, name, reg_vif_setup);
749 if (dev == NULL)
750 return NULL;
751
752 dev_net_set(dev, net);
753
754 if (register_netdevice(dev)) {
755 free_netdev(dev);
756 return NULL;
757 }
758 dev->iflink = 0;
759
760 if (dev_open(dev))
761 goto failure;
762
763 dev_hold(dev);
764 return dev;
765
766 failure:
767 /* allow the register to be completed before unregistering. */
768 rtnl_unlock();
769 rtnl_lock();
770
771 unregister_netdevice(dev);
772 return NULL;
773 }
774 #endif
775
776 /*
777 * Delete a VIF entry
778 */
779
780 static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
781 {
782 struct mif_device *v;
783 struct net_device *dev;
784 struct inet6_dev *in6_dev;
785
786 if (vifi < 0 || vifi >= mrt->maxvif)
787 return -EADDRNOTAVAIL;
788
789 v = &mrt->vif6_table[vifi];
790
791 write_lock_bh(&mrt_lock);
792 dev = v->dev;
793 v->dev = NULL;
794
795 if (!dev) {
796 write_unlock_bh(&mrt_lock);
797 return -EADDRNOTAVAIL;
798 }
799
800 #ifdef CONFIG_IPV6_PIMSM_V2
801 if (vifi == mrt->mroute_reg_vif_num)
802 mrt->mroute_reg_vif_num = -1;
803 #endif
804
805 if (vifi + 1 == mrt->maxvif) {
806 int tmp;
807 for (tmp = vifi - 1; tmp >= 0; tmp--) {
808 if (MIF_EXISTS(mrt, tmp))
809 break;
810 }
811 mrt->maxvif = tmp + 1;
812 }
813
814 write_unlock_bh(&mrt_lock);
815
816 dev_set_allmulti(dev, -1);
817
818 in6_dev = __in6_dev_get(dev);
819 if (in6_dev) {
820 in6_dev->cnf.mc_forwarding--;
821 inet6_netconf_notify_devconf(dev_net(dev),
822 NETCONFA_MC_FORWARDING,
823 dev->ifindex, &in6_dev->cnf);
824 }
825
826 if (v->flags & MIFF_REGISTER)
827 unregister_netdevice_queue(dev, head);
828
829 dev_put(dev);
830 return 0;
831 }
832
833 static inline void ip6mr_cache_free(struct mfc6_cache *c)
834 {
835 kmem_cache_free(mrt_cachep, c);
836 }
837
838 /* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
840 */
841
842 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
843 {
844 struct net *net = read_pnet(&mrt->net);
845 struct sk_buff *skb;
846
847 atomic_dec(&mrt->cache_resolve_queue_len);
848
849 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
850 if (ipv6_hdr(skb)->version == 0) {
851 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
852 nlh->nlmsg_type = NLMSG_ERROR;
853 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
854 skb_trim(skb, nlh->nlmsg_len);
855 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
856 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
857 } else
858 kfree_skb(skb);
859 }
860
861 ip6mr_cache_free(c);
862 }
863
864
865 /* Timer process for all the unresolved queue. */
866
867 static void ipmr_do_expire_process(struct mr6_table *mrt)
868 {
869 unsigned long now = jiffies;
870 unsigned long expires = 10 * HZ;
871 struct mfc6_cache *c, *next;
872
873 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
874 if (time_after(c->mfc_un.unres.expires, now)) {
875 /* not yet... */
876 unsigned long interval = c->mfc_un.unres.expires - now;
877 if (interval < expires)
878 expires = interval;
879 continue;
880 }
881
882 list_del(&c->list);
883 mr6_netlink_event(mrt, c, RTM_DELROUTE);
884 ip6mr_destroy_unres(mrt, c);
885 }
886
887 if (!list_empty(&mrt->mfc6_unres_queue))
888 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
889 }
890
891 static void ipmr_expire_process(unsigned long arg)
892 {
893 struct mr6_table *mrt = (struct mr6_table *)arg;
894
895 if (!spin_trylock(&mfc_unres_lock)) {
896 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
897 return;
898 }
899
900 if (!list_empty(&mrt->mfc6_unres_queue))
901 ipmr_do_expire_process(mrt);
902
903 spin_unlock(&mfc_unres_lock);
904 }
905
906 /* Fill oifs list. It is called under write locked mrt_lock. */
907
908 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
909 unsigned char *ttls)
910 {
911 int vifi;
912
913 cache->mfc_un.res.minvif = MAXMIFS;
914 cache->mfc_un.res.maxvif = 0;
915 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
916
917 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
918 if (MIF_EXISTS(mrt, vifi) &&
919 ttls[vifi] && ttls[vifi] < 255) {
920 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
921 if (cache->mfc_un.res.minvif > vifi)
922 cache->mfc_un.res.minvif = vifi;
923 if (cache->mfc_un.res.maxvif <= vifi)
924 cache->mfc_un.res.maxvif = vifi + 1;
925 }
926 }
927 }
928
929 static int mif6_add(struct net *net, struct mr6_table *mrt,
930 struct mif6ctl *vifc, int mrtsock)
931 {
932 int vifi = vifc->mif6c_mifi;
933 struct mif_device *v = &mrt->vif6_table[vifi];
934 struct net_device *dev;
935 struct inet6_dev *in6_dev;
936 int err;
937
938 /* Is vif busy ? */
939 if (MIF_EXISTS(mrt, vifi))
940 return -EADDRINUSE;
941
942 switch (vifc->mif6c_flags) {
943 #ifdef CONFIG_IPV6_PIMSM_V2
944 case MIFF_REGISTER:
945 /*
946 * Special Purpose VIF in PIM
947 * All the packets will be sent to the daemon
948 */
949 if (mrt->mroute_reg_vif_num >= 0)
950 return -EADDRINUSE;
951 dev = ip6mr_reg_vif(net, mrt);
952 if (!dev)
953 return -ENOBUFS;
954 err = dev_set_allmulti(dev, 1);
955 if (err) {
956 unregister_netdevice(dev);
957 dev_put(dev);
958 return err;
959 }
960 break;
961 #endif
962 case 0:
963 dev = dev_get_by_index(net, vifc->mif6c_pifi);
964 if (!dev)
965 return -EADDRNOTAVAIL;
966 err = dev_set_allmulti(dev, 1);
967 if (err) {
968 dev_put(dev);
969 return err;
970 }
971 break;
972 default:
973 return -EINVAL;
974 }
975
976 in6_dev = __in6_dev_get(dev);
977 if (in6_dev) {
978 in6_dev->cnf.mc_forwarding++;
979 inet6_netconf_notify_devconf(dev_net(dev),
980 NETCONFA_MC_FORWARDING,
981 dev->ifindex, &in6_dev->cnf);
982 }
983
984 /*
985 * Fill in the VIF structures
986 */
987 v->rate_limit = vifc->vifc_rate_limit;
988 v->flags = vifc->mif6c_flags;
989 if (!mrtsock)
990 v->flags |= VIFF_STATIC;
991 v->threshold = vifc->vifc_threshold;
992 v->bytes_in = 0;
993 v->bytes_out = 0;
994 v->pkt_in = 0;
995 v->pkt_out = 0;
996 v->link = dev->ifindex;
997 if (v->flags & MIFF_REGISTER)
998 v->link = dev->iflink;
999
1000 /* And finish update writing critical data */
1001 write_lock_bh(&mrt_lock);
1002 v->dev = dev;
1003 #ifdef CONFIG_IPV6_PIMSM_V2
1004 if (v->flags & MIFF_REGISTER)
1005 mrt->mroute_reg_vif_num = vifi;
1006 #endif
1007 if (vifi + 1 > mrt->maxvif)
1008 mrt->maxvif = vifi + 1;
1009 write_unlock_bh(&mrt_lock);
1010 return 0;
1011 }
1012
1013 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1014 const struct in6_addr *origin,
1015 const struct in6_addr *mcastgrp)
1016 {
1017 int line = MFC6_HASH(mcastgrp, origin);
1018 struct mfc6_cache *c;
1019
1020 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1021 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1022 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1023 return c;
1024 }
1025 return NULL;
1026 }
1027
1028 /* Look for a (*,*,oif) entry */
1029 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1030 mifi_t mifi)
1031 {
1032 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1033 struct mfc6_cache *c;
1034
1035 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1036 if (ipv6_addr_any(&c->mf6c_origin) &&
1037 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1038 (c->mfc_un.res.ttls[mifi] < 255))
1039 return c;
1040
1041 return NULL;
1042 }
1043
1044 /* Look for a (*,G) entry */
1045 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1046 struct in6_addr *mcastgrp,
1047 mifi_t mifi)
1048 {
1049 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1050 struct mfc6_cache *c, *proxy;
1051
1052 if (ipv6_addr_any(mcastgrp))
1053 goto skip;
1054
1055 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1056 if (ipv6_addr_any(&c->mf6c_origin) &&
1057 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1058 if (c->mfc_un.res.ttls[mifi] < 255)
1059 return c;
1060
1061 /* It's ok if the mifi is part of the static tree */
1062 proxy = ip6mr_cache_find_any_parent(mrt,
1063 c->mf6c_parent);
1064 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1065 return c;
1066 }
1067
1068 skip:
1069 return ip6mr_cache_find_any_parent(mrt, mifi);
1070 }
1071
1072 /*
1073 * Allocate a multicast cache entry
1074 */
1075 static struct mfc6_cache *ip6mr_cache_alloc(void)
1076 {
1077 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1078 if (c == NULL)
1079 return NULL;
1080 c->mfc_un.res.minvif = MAXMIFS;
1081 return c;
1082 }
1083
1084 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1085 {
1086 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1087 if (c == NULL)
1088 return NULL;
1089 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1090 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1091 return c;
1092 }
1093
1094 /*
1095 * A cache entry has gone into a resolved state from queued
1096 */
1097
1098 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1099 struct mfc6_cache *uc, struct mfc6_cache *c)
1100 {
1101 struct sk_buff *skb;
1102
1103 /*
1104 * Play the pending entries through our router
1105 */
1106
1107 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1108 if (ipv6_hdr(skb)->version == 0) {
1109 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1110
1111 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1112 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1113 } else {
1114 nlh->nlmsg_type = NLMSG_ERROR;
1115 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1116 skb_trim(skb, nlh->nlmsg_len);
1117 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1118 }
1119 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1120 } else
1121 ip6_mr_forward(net, mrt, skb, c);
1122 }
1123 }
1124
1125 /*
1126 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1127 * expects the following bizarre scheme.
1128 *
1129 * Called under mrt_lock.
1130 */
1131
1132 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1133 mifi_t mifi, int assert)
1134 {
1135 struct sk_buff *skb;
1136 struct mrt6msg *msg;
1137 int ret;
1138
1139 #ifdef CONFIG_IPV6_PIMSM_V2
1140 if (assert == MRT6MSG_WHOLEPKT)
1141 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1142 +sizeof(*msg));
1143 else
1144 #endif
1145 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1146
1147 if (!skb)
1148 return -ENOBUFS;
1149
1150 /* I suppose that internal messages
1151 * do not require checksums */
1152
1153 skb->ip_summed = CHECKSUM_UNNECESSARY;
1154
1155 #ifdef CONFIG_IPV6_PIMSM_V2
1156 if (assert == MRT6MSG_WHOLEPKT) {
1157 /* Ugly, but we have no choice with this interface.
1158 Duplicate old header, fix length etc.
1159 And all this only to mangle msg->im6_msgtype and
1160 to set msg->im6_mbz to "mbz" :-)
1161 */
1162 skb_push(skb, -skb_network_offset(pkt));
1163
1164 skb_push(skb, sizeof(*msg));
1165 skb_reset_transport_header(skb);
1166 msg = (struct mrt6msg *)skb_transport_header(skb);
1167 msg->im6_mbz = 0;
1168 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1169 msg->im6_mif = mrt->mroute_reg_vif_num;
1170 msg->im6_pad = 0;
1171 msg->im6_src = ipv6_hdr(pkt)->saddr;
1172 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1173
1174 skb->ip_summed = CHECKSUM_UNNECESSARY;
1175 } else
1176 #endif
1177 {
1178 /*
1179 * Copy the IP header
1180 */
1181
1182 skb_put(skb, sizeof(struct ipv6hdr));
1183 skb_reset_network_header(skb);
1184 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1185
1186 /*
1187 * Add our header
1188 */
1189 skb_put(skb, sizeof(*msg));
1190 skb_reset_transport_header(skb);
1191 msg = (struct mrt6msg *)skb_transport_header(skb);
1192
1193 msg->im6_mbz = 0;
1194 msg->im6_msgtype = assert;
1195 msg->im6_mif = mifi;
1196 msg->im6_pad = 0;
1197 msg->im6_src = ipv6_hdr(pkt)->saddr;
1198 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1199
1200 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1201 skb->ip_summed = CHECKSUM_UNNECESSARY;
1202 }
1203
1204 if (mrt->mroute6_sk == NULL) {
1205 kfree_skb(skb);
1206 return -EINVAL;
1207 }
1208
1209 /*
1210 * Deliver to user space multicast routing algorithms
1211 */
1212 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1213 if (ret < 0) {
1214 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1215 kfree_skb(skb);
1216 }
1217
1218 return ret;
1219 }
1220
1221 /*
1222 * Queue a packet for resolution. It gets locked cache entry!
1223 */
1224
1225 static int
1226 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1227 {
1228 bool found = false;
1229 int err;
1230 struct mfc6_cache *c;
1231
1232 spin_lock_bh(&mfc_unres_lock);
1233 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1234 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1235 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1236 found = true;
1237 break;
1238 }
1239 }
1240
1241 if (!found) {
1242 /*
1243 * Create a new entry if allowable
1244 */
1245
1246 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1247 (c = ip6mr_cache_alloc_unres()) == NULL) {
1248 spin_unlock_bh(&mfc_unres_lock);
1249
1250 kfree_skb(skb);
1251 return -ENOBUFS;
1252 }
1253
1254 /*
1255 * Fill in the new cache entry
1256 */
1257 c->mf6c_parent = -1;
1258 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1259 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1260
1261 /*
1262 * Reflect first query at pim6sd
1263 */
1264 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1265 if (err < 0) {
1266 /* If the report failed throw the cache entry
1267 out - Brad Parker
1268 */
1269 spin_unlock_bh(&mfc_unres_lock);
1270
1271 ip6mr_cache_free(c);
1272 kfree_skb(skb);
1273 return err;
1274 }
1275
1276 atomic_inc(&mrt->cache_resolve_queue_len);
1277 list_add(&c->list, &mrt->mfc6_unres_queue);
1278 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1279
1280 ipmr_do_expire_process(mrt);
1281 }
1282
1283 /*
1284 * See if we can append the packet
1285 */
1286 if (c->mfc_un.unres.unresolved.qlen > 3) {
1287 kfree_skb(skb);
1288 err = -ENOBUFS;
1289 } else {
1290 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1291 err = 0;
1292 }
1293
1294 spin_unlock_bh(&mfc_unres_lock);
1295 return err;
1296 }
1297
1298 /*
1299 * MFC6 cache manipulation by user space
1300 */
1301
1302 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1303 int parent)
1304 {
1305 int line;
1306 struct mfc6_cache *c, *next;
1307
1308 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1309
1310 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1311 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1312 ipv6_addr_equal(&c->mf6c_mcastgrp,
1313 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1314 (parent == -1 || parent == c->mf6c_parent)) {
1315 write_lock_bh(&mrt_lock);
1316 list_del(&c->list);
1317 write_unlock_bh(&mrt_lock);
1318
1319 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1320 ip6mr_cache_free(c);
1321 return 0;
1322 }
1323 }
1324 return -ENOENT;
1325 }
1326
1327 static int ip6mr_device_event(struct notifier_block *this,
1328 unsigned long event, void *ptr)
1329 {
1330 struct net_device *dev = ptr;
1331 struct net *net = dev_net(dev);
1332 struct mr6_table *mrt;
1333 struct mif_device *v;
1334 int ct;
1335 LIST_HEAD(list);
1336
1337 if (event != NETDEV_UNREGISTER)
1338 return NOTIFY_DONE;
1339
1340 ip6mr_for_each_table(mrt, net) {
1341 v = &mrt->vif6_table[0];
1342 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1343 if (v->dev == dev)
1344 mif6_delete(mrt, ct, &list);
1345 }
1346 }
1347 unregister_netdevice_many(&list);
1348
1349 return NOTIFY_DONE;
1350 }
1351
1352 static struct notifier_block ip6_mr_notifier = {
1353 .notifier_call = ip6mr_device_event
1354 };
1355
1356 /*
1357 * Setup for IP multicast routing
1358 */
1359
1360 static int __net_init ip6mr_net_init(struct net *net)
1361 {
1362 int err;
1363
1364 err = ip6mr_rules_init(net);
1365 if (err < 0)
1366 goto fail;
1367
1368 #ifdef CONFIG_PROC_FS
1369 err = -ENOMEM;
1370 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1371 goto proc_vif_fail;
1372 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1373 goto proc_cache_fail;
1374 #endif
1375
1376 return 0;
1377
1378 #ifdef CONFIG_PROC_FS
1379 proc_cache_fail:
1380 remove_proc_entry("ip6_mr_vif", net->proc_net);
1381 proc_vif_fail:
1382 ip6mr_rules_exit(net);
1383 #endif
1384 fail:
1385 return err;
1386 }
1387
1388 static void __net_exit ip6mr_net_exit(struct net *net)
1389 {
1390 #ifdef CONFIG_PROC_FS
1391 remove_proc_entry("ip6_mr_cache", net->proc_net);
1392 remove_proc_entry("ip6_mr_vif", net->proc_net);
1393 #endif
1394 ip6mr_rules_exit(net);
1395 }
1396
1397 static struct pernet_operations ip6mr_net_ops = {
1398 .init = ip6mr_net_init,
1399 .exit = ip6mr_net_exit,
1400 };
1401
1402 int __init ip6_mr_init(void)
1403 {
1404 int err;
1405
1406 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1407 sizeof(struct mfc6_cache),
1408 0, SLAB_HWCACHE_ALIGN,
1409 NULL);
1410 if (!mrt_cachep)
1411 return -ENOMEM;
1412
1413 err = register_pernet_subsys(&ip6mr_net_ops);
1414 if (err)
1415 goto reg_pernet_fail;
1416
1417 err = register_netdevice_notifier(&ip6_mr_notifier);
1418 if (err)
1419 goto reg_notif_fail;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1422 pr_err("%s: can't add PIM protocol\n", __func__);
1423 err = -EAGAIN;
1424 goto add_proto_fail;
1425 }
1426 #endif
1427 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1428 ip6mr_rtm_dumproute, NULL);
1429 return 0;
1430 #ifdef CONFIG_IPV6_PIMSM_V2
1431 add_proto_fail:
1432 unregister_netdevice_notifier(&ip6_mr_notifier);
1433 #endif
1434 reg_notif_fail:
1435 unregister_pernet_subsys(&ip6mr_net_ops);
1436 reg_pernet_fail:
1437 kmem_cache_destroy(mrt_cachep);
1438 return err;
1439 }
1440
1441 void ip6_mr_cleanup(void)
1442 {
1443 unregister_netdevice_notifier(&ip6_mr_notifier);
1444 unregister_pernet_subsys(&ip6mr_net_ops);
1445 kmem_cache_destroy(mrt_cachep);
1446 }
1447
1448 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1449 struct mf6cctl *mfc, int mrtsock, int parent)
1450 {
1451 bool found = false;
1452 int line;
1453 struct mfc6_cache *uc, *c;
1454 unsigned char ttls[MAXMIFS];
1455 int i;
1456
1457 if (mfc->mf6cc_parent >= MAXMIFS)
1458 return -ENFILE;
1459
1460 memset(ttls, 255, MAXMIFS);
1461 for (i = 0; i < MAXMIFS; i++) {
1462 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1463 ttls[i] = 1;
1464
1465 }
1466
1467 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1468
1469 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1470 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1471 ipv6_addr_equal(&c->mf6c_mcastgrp,
1472 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1473 (parent == -1 || parent == mfc->mf6cc_parent)) {
1474 found = true;
1475 break;
1476 }
1477 }
1478
1479 if (found) {
1480 write_lock_bh(&mrt_lock);
1481 c->mf6c_parent = mfc->mf6cc_parent;
1482 ip6mr_update_thresholds(mrt, c, ttls);
1483 if (!mrtsock)
1484 c->mfc_flags |= MFC_STATIC;
1485 write_unlock_bh(&mrt_lock);
1486 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1487 return 0;
1488 }
1489
1490 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1491 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1492 return -EINVAL;
1493
1494 c = ip6mr_cache_alloc();
1495 if (c == NULL)
1496 return -ENOMEM;
1497
1498 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1499 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1500 c->mf6c_parent = mfc->mf6cc_parent;
1501 ip6mr_update_thresholds(mrt, c, ttls);
1502 if (!mrtsock)
1503 c->mfc_flags |= MFC_STATIC;
1504
1505 write_lock_bh(&mrt_lock);
1506 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1507 write_unlock_bh(&mrt_lock);
1508
1509 /*
1510 * Check to see if we resolved a queued list. If so we
1511 * need to send on the frames and tidy up.
1512 */
1513 found = false;
1514 spin_lock_bh(&mfc_unres_lock);
1515 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1516 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1517 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1518 list_del(&uc->list);
1519 atomic_dec(&mrt->cache_resolve_queue_len);
1520 found = true;
1521 break;
1522 }
1523 }
1524 if (list_empty(&mrt->mfc6_unres_queue))
1525 del_timer(&mrt->ipmr_expire_timer);
1526 spin_unlock_bh(&mfc_unres_lock);
1527
1528 if (found) {
1529 ip6mr_cache_resolve(net, mrt, uc, c);
1530 ip6mr_cache_free(uc);
1531 }
1532 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1533 return 0;
1534 }
1535
1536 /*
1537 * Close the multicast socket, and clear the vif tables etc
1538 */
1539
1540 static void mroute_clean_tables(struct mr6_table *mrt)
1541 {
1542 int i;
1543 LIST_HEAD(list);
1544 struct mfc6_cache *c, *next;
1545
1546 /*
1547 * Shut down all active vif entries
1548 */
1549 for (i = 0; i < mrt->maxvif; i++) {
1550 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1551 mif6_delete(mrt, i, &list);
1552 }
1553 unregister_netdevice_many(&list);
1554
1555 /*
1556 * Wipe the cache
1557 */
1558 for (i = 0; i < MFC6_LINES; i++) {
1559 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1560 if (c->mfc_flags & MFC_STATIC)
1561 continue;
1562 write_lock_bh(&mrt_lock);
1563 list_del(&c->list);
1564 write_unlock_bh(&mrt_lock);
1565
1566 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1567 ip6mr_cache_free(c);
1568 }
1569 }
1570
1571 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1572 spin_lock_bh(&mfc_unres_lock);
1573 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1574 list_del(&c->list);
1575 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1576 ip6mr_destroy_unres(mrt, c);
1577 }
1578 spin_unlock_bh(&mfc_unres_lock);
1579 }
1580 }
1581
1582 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1583 {
1584 int err = 0;
1585 struct net *net = sock_net(sk);
1586
1587 rtnl_lock();
1588 write_lock_bh(&mrt_lock);
1589 if (likely(mrt->mroute6_sk == NULL)) {
1590 mrt->mroute6_sk = sk;
1591 net->ipv6.devconf_all->mc_forwarding++;
1592 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1593 NETCONFA_IFINDEX_ALL,
1594 net->ipv6.devconf_all);
1595 }
1596 else
1597 err = -EADDRINUSE;
1598 write_unlock_bh(&mrt_lock);
1599
1600 rtnl_unlock();
1601
1602 return err;
1603 }
1604
1605 int ip6mr_sk_done(struct sock *sk)
1606 {
1607 int err = -EACCES;
1608 struct net *net = sock_net(sk);
1609 struct mr6_table *mrt;
1610
1611 rtnl_lock();
1612 ip6mr_for_each_table(mrt, net) {
1613 if (sk == mrt->mroute6_sk) {
1614 write_lock_bh(&mrt_lock);
1615 mrt->mroute6_sk = NULL;
1616 net->ipv6.devconf_all->mc_forwarding--;
1617 inet6_netconf_notify_devconf(net,
1618 NETCONFA_MC_FORWARDING,
1619 NETCONFA_IFINDEX_ALL,
1620 net->ipv6.devconf_all);
1621 write_unlock_bh(&mrt_lock);
1622
1623 mroute_clean_tables(mrt);
1624 err = 0;
1625 break;
1626 }
1627 }
1628 rtnl_unlock();
1629
1630 return err;
1631 }
1632
1633 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1634 {
1635 struct mr6_table *mrt;
1636 struct flowi6 fl6 = {
1637 .flowi6_iif = skb->skb_iif,
1638 .flowi6_oif = skb->dev->ifindex,
1639 .flowi6_mark = skb->mark,
1640 };
1641
1642 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1643 return NULL;
1644
1645 return mrt->mroute6_sk;
1646 }
1647
1648 /*
1649 * Socket options and virtual interface manipulation. The whole
1650 * virtual interface system is a complete heap, but unfortunately
1651 * that's how BSD mrouted happens to think. Maybe one day with a proper
1652 * MOSPF/PIM router set up we can clean this up.
1653 */
1654
1655 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1656 {
1657 int ret, parent = 0;
1658 struct mif6ctl vif;
1659 struct mf6cctl mfc;
1660 mifi_t mifi;
1661 struct net *net = sock_net(sk);
1662 struct mr6_table *mrt;
1663
1664 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1665 if (mrt == NULL)
1666 return -ENOENT;
1667
1668 if (optname != MRT6_INIT) {
1669 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1670 return -EACCES;
1671 }
1672
1673 switch (optname) {
1674 case MRT6_INIT:
1675 if (sk->sk_type != SOCK_RAW ||
1676 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1677 return -EOPNOTSUPP;
1678 if (optlen < sizeof(int))
1679 return -EINVAL;
1680
1681 return ip6mr_sk_init(mrt, sk);
1682
1683 case MRT6_DONE:
1684 return ip6mr_sk_done(sk);
1685
1686 case MRT6_ADD_MIF:
1687 if (optlen < sizeof(vif))
1688 return -EINVAL;
1689 if (copy_from_user(&vif, optval, sizeof(vif)))
1690 return -EFAULT;
1691 if (vif.mif6c_mifi >= MAXMIFS)
1692 return -ENFILE;
1693 rtnl_lock();
1694 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1695 rtnl_unlock();
1696 return ret;
1697
1698 case MRT6_DEL_MIF:
1699 if (optlen < sizeof(mifi_t))
1700 return -EINVAL;
1701 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1702 return -EFAULT;
1703 rtnl_lock();
1704 ret = mif6_delete(mrt, mifi, NULL);
1705 rtnl_unlock();
1706 return ret;
1707
1708 /*
1709 * Manipulate the forwarding caches. These live
1710 * in a sort of kernel/user symbiosis.
1711 */
1712 case MRT6_ADD_MFC:
1713 case MRT6_DEL_MFC:
1714 parent = -1;
1715 case MRT6_ADD_MFC_PROXY:
1716 case MRT6_DEL_MFC_PROXY:
1717 if (optlen < sizeof(mfc))
1718 return -EINVAL;
1719 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1720 return -EFAULT;
1721 if (parent == 0)
1722 parent = mfc.mf6cc_parent;
1723 rtnl_lock();
1724 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1725 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1726 else
1727 ret = ip6mr_mfc_add(net, mrt, &mfc,
1728 sk == mrt->mroute6_sk, parent);
1729 rtnl_unlock();
1730 return ret;
1731
1732 /*
1733 * Control PIM assert (to activate pim will activate assert)
1734 */
1735 case MRT6_ASSERT:
1736 {
1737 int v;
1738
1739 if (optlen != sizeof(v))
1740 return -EINVAL;
1741 if (get_user(v, (int __user *)optval))
1742 return -EFAULT;
1743 mrt->mroute_do_assert = v;
1744 return 0;
1745 }
1746
1747 #ifdef CONFIG_IPV6_PIMSM_V2
1748 case MRT6_PIM:
1749 {
1750 int v;
1751
1752 if (optlen != sizeof(v))
1753 return -EINVAL;
1754 if (get_user(v, (int __user *)optval))
1755 return -EFAULT;
1756 v = !!v;
1757 rtnl_lock();
1758 ret = 0;
1759 if (v != mrt->mroute_do_pim) {
1760 mrt->mroute_do_pim = v;
1761 mrt->mroute_do_assert = v;
1762 }
1763 rtnl_unlock();
1764 return ret;
1765 }
1766
1767 #endif
1768 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1769 case MRT6_TABLE:
1770 {
1771 u32 v;
1772
1773 if (optlen != sizeof(u32))
1774 return -EINVAL;
1775 if (get_user(v, (u32 __user *)optval))
1776 return -EFAULT;
1777 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1778 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1779 return -EINVAL;
1780 if (sk == mrt->mroute6_sk)
1781 return -EBUSY;
1782
1783 rtnl_lock();
1784 ret = 0;
1785 if (!ip6mr_new_table(net, v))
1786 ret = -ENOMEM;
1787 raw6_sk(sk)->ip6mr_table = v;
1788 rtnl_unlock();
1789 return ret;
1790 }
1791 #endif
1792 /*
1793 * Spurious command, or MRT6_VERSION which you cannot
1794 * set.
1795 */
1796 default:
1797 return -ENOPROTOOPT;
1798 }
1799 }
1800
1801 /*
1802 * Getsock opt support for the multicast routing system.
1803 */
1804
1805 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1806 int __user *optlen)
1807 {
1808 int olr;
1809 int val;
1810 struct net *net = sock_net(sk);
1811 struct mr6_table *mrt;
1812
1813 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1814 if (mrt == NULL)
1815 return -ENOENT;
1816
1817 switch (optname) {
1818 case MRT6_VERSION:
1819 val = 0x0305;
1820 break;
1821 #ifdef CONFIG_IPV6_PIMSM_V2
1822 case MRT6_PIM:
1823 val = mrt->mroute_do_pim;
1824 break;
1825 #endif
1826 case MRT6_ASSERT:
1827 val = mrt->mroute_do_assert;
1828 break;
1829 default:
1830 return -ENOPROTOOPT;
1831 }
1832
1833 if (get_user(olr, optlen))
1834 return -EFAULT;
1835
1836 olr = min_t(int, olr, sizeof(int));
1837 if (olr < 0)
1838 return -EINVAL;
1839
1840 if (put_user(olr, optlen))
1841 return -EFAULT;
1842 if (copy_to_user(optval, &val, olr))
1843 return -EFAULT;
1844 return 0;
1845 }
1846
1847 /*
1848 * The IP multicast ioctl support routines.
1849 */
1850
1851 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1852 {
1853 struct sioc_sg_req6 sr;
1854 struct sioc_mif_req6 vr;
1855 struct mif_device *vif;
1856 struct mfc6_cache *c;
1857 struct net *net = sock_net(sk);
1858 struct mr6_table *mrt;
1859
1860 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1861 if (mrt == NULL)
1862 return -ENOENT;
1863
1864 switch (cmd) {
1865 case SIOCGETMIFCNT_IN6:
1866 if (copy_from_user(&vr, arg, sizeof(vr)))
1867 return -EFAULT;
1868 if (vr.mifi >= mrt->maxvif)
1869 return -EINVAL;
1870 read_lock(&mrt_lock);
1871 vif = &mrt->vif6_table[vr.mifi];
1872 if (MIF_EXISTS(mrt, vr.mifi)) {
1873 vr.icount = vif->pkt_in;
1874 vr.ocount = vif->pkt_out;
1875 vr.ibytes = vif->bytes_in;
1876 vr.obytes = vif->bytes_out;
1877 read_unlock(&mrt_lock);
1878
1879 if (copy_to_user(arg, &vr, sizeof(vr)))
1880 return -EFAULT;
1881 return 0;
1882 }
1883 read_unlock(&mrt_lock);
1884 return -EADDRNOTAVAIL;
1885 case SIOCGETSGCNT_IN6:
1886 if (copy_from_user(&sr, arg, sizeof(sr)))
1887 return -EFAULT;
1888
1889 read_lock(&mrt_lock);
1890 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1891 if (c) {
1892 sr.pktcnt = c->mfc_un.res.pkt;
1893 sr.bytecnt = c->mfc_un.res.bytes;
1894 sr.wrong_if = c->mfc_un.res.wrong_if;
1895 read_unlock(&mrt_lock);
1896
1897 if (copy_to_user(arg, &sr, sizeof(sr)))
1898 return -EFAULT;
1899 return 0;
1900 }
1901 read_unlock(&mrt_lock);
1902 return -EADDRNOTAVAIL;
1903 default:
1904 return -ENOIOCTLCMD;
1905 }
1906 }
1907
1908 #ifdef CONFIG_COMPAT
1909 struct compat_sioc_sg_req6 {
1910 struct sockaddr_in6 src;
1911 struct sockaddr_in6 grp;
1912 compat_ulong_t pktcnt;
1913 compat_ulong_t bytecnt;
1914 compat_ulong_t wrong_if;
1915 };
1916
1917 struct compat_sioc_mif_req6 {
1918 mifi_t mifi;
1919 compat_ulong_t icount;
1920 compat_ulong_t ocount;
1921 compat_ulong_t ibytes;
1922 compat_ulong_t obytes;
1923 };
1924
1925 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1926 {
1927 struct compat_sioc_sg_req6 sr;
1928 struct compat_sioc_mif_req6 vr;
1929 struct mif_device *vif;
1930 struct mfc6_cache *c;
1931 struct net *net = sock_net(sk);
1932 struct mr6_table *mrt;
1933
1934 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1935 if (mrt == NULL)
1936 return -ENOENT;
1937
1938 switch (cmd) {
1939 case SIOCGETMIFCNT_IN6:
1940 if (copy_from_user(&vr, arg, sizeof(vr)))
1941 return -EFAULT;
1942 if (vr.mifi >= mrt->maxvif)
1943 return -EINVAL;
1944 read_lock(&mrt_lock);
1945 vif = &mrt->vif6_table[vr.mifi];
1946 if (MIF_EXISTS(mrt, vr.mifi)) {
1947 vr.icount = vif->pkt_in;
1948 vr.ocount = vif->pkt_out;
1949 vr.ibytes = vif->bytes_in;
1950 vr.obytes = vif->bytes_out;
1951 read_unlock(&mrt_lock);
1952
1953 if (copy_to_user(arg, &vr, sizeof(vr)))
1954 return -EFAULT;
1955 return 0;
1956 }
1957 read_unlock(&mrt_lock);
1958 return -EADDRNOTAVAIL;
1959 case SIOCGETSGCNT_IN6:
1960 if (copy_from_user(&sr, arg, sizeof(sr)))
1961 return -EFAULT;
1962
1963 read_lock(&mrt_lock);
1964 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1965 if (c) {
1966 sr.pktcnt = c->mfc_un.res.pkt;
1967 sr.bytecnt = c->mfc_un.res.bytes;
1968 sr.wrong_if = c->mfc_un.res.wrong_if;
1969 read_unlock(&mrt_lock);
1970
1971 if (copy_to_user(arg, &sr, sizeof(sr)))
1972 return -EFAULT;
1973 return 0;
1974 }
1975 read_unlock(&mrt_lock);
1976 return -EADDRNOTAVAIL;
1977 default:
1978 return -ENOIOCTLCMD;
1979 }
1980 }
1981 #endif
1982
1983 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1984 {
1985 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1986 IPSTATS_MIB_OUTFORWDATAGRAMS);
1987 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1988 IPSTATS_MIB_OUTOCTETS, skb->len);
1989 return dst_output(skb);
1990 }
1991
1992 /*
1993 * Processing handlers for ip6mr_forward
1994 */
1995
1996 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1997 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1998 {
1999 struct ipv6hdr *ipv6h;
2000 struct mif_device *vif = &mrt->vif6_table[vifi];
2001 struct net_device *dev;
2002 struct dst_entry *dst;
2003 struct flowi6 fl6;
2004
2005 if (vif->dev == NULL)
2006 goto out_free;
2007
2008 #ifdef CONFIG_IPV6_PIMSM_V2
2009 if (vif->flags & MIFF_REGISTER) {
2010 vif->pkt_out++;
2011 vif->bytes_out += skb->len;
2012 vif->dev->stats.tx_bytes += skb->len;
2013 vif->dev->stats.tx_packets++;
2014 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2015 goto out_free;
2016 }
2017 #endif
2018
2019 ipv6h = ipv6_hdr(skb);
2020
2021 fl6 = (struct flowi6) {
2022 .flowi6_oif = vif->link,
2023 .daddr = ipv6h->daddr,
2024 };
2025
2026 dst = ip6_route_output(net, NULL, &fl6);
2027 if (dst->error) {
2028 dst_release(dst);
2029 goto out_free;
2030 }
2031
2032 skb_dst_drop(skb);
2033 skb_dst_set(skb, dst);
2034
2035 /*
2036 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2037 * not only before forwarding, but after forwarding on all output
2038 * interfaces. It is clear, if mrouter runs a multicasting
2039 * program, it should receive packets not depending to what interface
2040 * program is joined.
2041 * If we will not make it, the program will have to join on all
2042 * interfaces. On the other hand, multihoming host (or router, but
2043 * not mrouter) cannot join to more than one interface - it will
2044 * result in receiving multiple packets.
2045 */
2046 dev = vif->dev;
2047 skb->dev = dev;
2048 vif->pkt_out++;
2049 vif->bytes_out += skb->len;
2050
2051 /* We are about to write */
2052 /* XXX: extension headers? */
2053 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2054 goto out_free;
2055
2056 ipv6h = ipv6_hdr(skb);
2057 ipv6h->hop_limit--;
2058
2059 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2060
2061 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2062 ip6mr_forward2_finish);
2063
2064 out_free:
2065 kfree_skb(skb);
2066 return 0;
2067 }
2068
2069 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2070 {
2071 int ct;
2072
2073 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2074 if (mrt->vif6_table[ct].dev == dev)
2075 break;
2076 }
2077 return ct;
2078 }
2079
2080 static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2081 struct sk_buff *skb, struct mfc6_cache *cache)
2082 {
2083 int psend = -1;
2084 int vif, ct;
2085 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2086
2087 vif = cache->mf6c_parent;
2088 cache->mfc_un.res.pkt++;
2089 cache->mfc_un.res.bytes += skb->len;
2090
2091 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2092 struct mfc6_cache *cache_proxy;
2093
2094 /* For an (*,G) entry, we only check that the incomming
2095 * interface is part of the static tree.
2096 */
2097 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2098 if (cache_proxy &&
2099 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2100 goto forward;
2101 }
2102
2103 /*
2104 * Wrong interface: drop packet and (maybe) send PIM assert.
2105 */
2106 if (mrt->vif6_table[vif].dev != skb->dev) {
2107 cache->mfc_un.res.wrong_if++;
2108
2109 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2110 /* pimsm uses asserts, when switching from RPT to SPT,
2111 so that we cannot check that packet arrived on an oif.
2112 It is bad, but otherwise we would need to move pretty
2113 large chunk of pimd to kernel. Ough... --ANK
2114 */
2115 (mrt->mroute_do_pim ||
2116 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2117 time_after(jiffies,
2118 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2119 cache->mfc_un.res.last_assert = jiffies;
2120 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2121 }
2122 goto dont_forward;
2123 }
2124
2125 forward:
2126 mrt->vif6_table[vif].pkt_in++;
2127 mrt->vif6_table[vif].bytes_in += skb->len;
2128
2129 /*
2130 * Forward the frame
2131 */
2132 if (ipv6_addr_any(&cache->mf6c_origin) &&
2133 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2134 if (true_vifi >= 0 &&
2135 true_vifi != cache->mf6c_parent &&
2136 ipv6_hdr(skb)->hop_limit >
2137 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2138 /* It's an (*,*) entry and the packet is not coming from
2139 * the upstream: forward the packet to the upstream
2140 * only.
2141 */
2142 psend = cache->mf6c_parent;
2143 goto last_forward;
2144 }
2145 goto dont_forward;
2146 }
2147 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2148 /* For (*,G) entry, don't forward to the incoming interface */
2149 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2150 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2151 if (psend != -1) {
2152 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2153 if (skb2)
2154 ip6mr_forward2(net, mrt, skb2, cache, psend);
2155 }
2156 psend = ct;
2157 }
2158 }
2159 last_forward:
2160 if (psend != -1) {
2161 ip6mr_forward2(net, mrt, skb, cache, psend);
2162 return 0;
2163 }
2164
2165 dont_forward:
2166 kfree_skb(skb);
2167 return 0;
2168 }
2169
2170
2171 /*
2172 * Multicast packets for forwarding arrive here
2173 */
2174
2175 int ip6_mr_input(struct sk_buff *skb)
2176 {
2177 struct mfc6_cache *cache;
2178 struct net *net = dev_net(skb->dev);
2179 struct mr6_table *mrt;
2180 struct flowi6 fl6 = {
2181 .flowi6_iif = skb->dev->ifindex,
2182 .flowi6_mark = skb->mark,
2183 };
2184 int err;
2185
2186 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2187 if (err < 0) {
2188 kfree_skb(skb);
2189 return err;
2190 }
2191
2192 read_lock(&mrt_lock);
2193 cache = ip6mr_cache_find(mrt,
2194 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2195 if (cache == NULL) {
2196 int vif = ip6mr_find_vif(mrt, skb->dev);
2197
2198 if (vif >= 0)
2199 cache = ip6mr_cache_find_any(mrt,
2200 &ipv6_hdr(skb)->daddr,
2201 vif);
2202 }
2203
2204 /*
2205 * No usable cache entry
2206 */
2207 if (cache == NULL) {
2208 int vif;
2209
2210 vif = ip6mr_find_vif(mrt, skb->dev);
2211 if (vif >= 0) {
2212 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2213 read_unlock(&mrt_lock);
2214
2215 return err;
2216 }
2217 read_unlock(&mrt_lock);
2218 kfree_skb(skb);
2219 return -ENODEV;
2220 }
2221
2222 ip6_mr_forward(net, mrt, skb, cache);
2223
2224 read_unlock(&mrt_lock);
2225
2226 return 0;
2227 }
2228
2229
2230 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2231 struct mfc6_cache *c, struct rtmsg *rtm)
2232 {
2233 int ct;
2234 struct rtnexthop *nhp;
2235 struct nlattr *mp_attr;
2236 struct rta_mfc_stats mfcs;
2237
2238 /* If cache is unresolved, don't try to parse IIF and OIF */
2239 if (c->mf6c_parent >= MAXMIFS)
2240 return -ENOENT;
2241
2242 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2243 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2244 return -EMSGSIZE;
2245 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2246 if (mp_attr == NULL)
2247 return -EMSGSIZE;
2248
2249 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2250 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2251 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2252 if (nhp == NULL) {
2253 nla_nest_cancel(skb, mp_attr);
2254 return -EMSGSIZE;
2255 }
2256
2257 nhp->rtnh_flags = 0;
2258 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2259 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2260 nhp->rtnh_len = sizeof(*nhp);
2261 }
2262 }
2263
2264 nla_nest_end(skb, mp_attr);
2265
2266 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2267 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2268 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2269 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2270 return -EMSGSIZE;
2271
2272 rtm->rtm_type = RTN_MULTICAST;
2273 return 1;
2274 }
2275
2276 int ip6mr_get_route(struct net *net,
2277 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2278 {
2279 int err;
2280 struct mr6_table *mrt;
2281 struct mfc6_cache *cache;
2282 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2283
2284 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2285 if (mrt == NULL)
2286 return -ENOENT;
2287
2288 read_lock(&mrt_lock);
2289 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2290 if (!cache && skb->dev) {
2291 int vif = ip6mr_find_vif(mrt, skb->dev);
2292
2293 if (vif >= 0)
2294 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2295 vif);
2296 }
2297
2298 if (!cache) {
2299 struct sk_buff *skb2;
2300 struct ipv6hdr *iph;
2301 struct net_device *dev;
2302 int vif;
2303
2304 if (nowait) {
2305 read_unlock(&mrt_lock);
2306 return -EAGAIN;
2307 }
2308
2309 dev = skb->dev;
2310 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2311 read_unlock(&mrt_lock);
2312 return -ENODEV;
2313 }
2314
2315 /* really correct? */
2316 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2317 if (!skb2) {
2318 read_unlock(&mrt_lock);
2319 return -ENOMEM;
2320 }
2321
2322 skb_reset_transport_header(skb2);
2323
2324 skb_put(skb2, sizeof(struct ipv6hdr));
2325 skb_reset_network_header(skb2);
2326
2327 iph = ipv6_hdr(skb2);
2328 iph->version = 0;
2329 iph->priority = 0;
2330 iph->flow_lbl[0] = 0;
2331 iph->flow_lbl[1] = 0;
2332 iph->flow_lbl[2] = 0;
2333 iph->payload_len = 0;
2334 iph->nexthdr = IPPROTO_NONE;
2335 iph->hop_limit = 0;
2336 iph->saddr = rt->rt6i_src.addr;
2337 iph->daddr = rt->rt6i_dst.addr;
2338
2339 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2340 read_unlock(&mrt_lock);
2341
2342 return err;
2343 }
2344
2345 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2346 cache->mfc_flags |= MFC_NOTIFY;
2347
2348 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2349 read_unlock(&mrt_lock);
2350 return err;
2351 }
2352
2353 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2354 u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
2355 {
2356 struct nlmsghdr *nlh;
2357 struct rtmsg *rtm;
2358 int err;
2359
2360 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
2361 if (nlh == NULL)
2362 return -EMSGSIZE;
2363
2364 rtm = nlmsg_data(nlh);
2365 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2366 rtm->rtm_dst_len = 128;
2367 rtm->rtm_src_len = 128;
2368 rtm->rtm_tos = 0;
2369 rtm->rtm_table = mrt->id;
2370 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2371 goto nla_put_failure;
2372 rtm->rtm_type = RTN_MULTICAST;
2373 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2374 if (c->mfc_flags & MFC_STATIC)
2375 rtm->rtm_protocol = RTPROT_STATIC;
2376 else
2377 rtm->rtm_protocol = RTPROT_MROUTED;
2378 rtm->rtm_flags = 0;
2379
2380 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2381 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2382 goto nla_put_failure;
2383 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2384 /* do not break the dump if cache is unresolved */
2385 if (err < 0 && err != -ENOENT)
2386 goto nla_put_failure;
2387
2388 return nlmsg_end(skb, nlh);
2389
2390 nla_put_failure:
2391 nlmsg_cancel(skb, nlh);
2392 return -EMSGSIZE;
2393 }
2394
2395 static int mr6_msgsize(bool unresolved, int maxvif)
2396 {
2397 size_t len =
2398 NLMSG_ALIGN(sizeof(struct rtmsg))
2399 + nla_total_size(4) /* RTA_TABLE */
2400 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2401 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2402 ;
2403
2404 if (!unresolved)
2405 len = len
2406 + nla_total_size(4) /* RTA_IIF */
2407 + nla_total_size(0) /* RTA_MULTIPATH */
2408 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2409 /* RTA_MFC_STATS */
2410 + nla_total_size(sizeof(struct rta_mfc_stats))
2411 ;
2412
2413 return len;
2414 }
2415
2416 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2417 int cmd)
2418 {
2419 struct net *net = read_pnet(&mrt->net);
2420 struct sk_buff *skb;
2421 int err = -ENOBUFS;
2422
2423 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2424 GFP_ATOMIC);
2425 if (skb == NULL)
2426 goto errout;
2427
2428 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
2429 if (err < 0)
2430 goto errout;
2431
2432 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2433 return;
2434
2435 errout:
2436 kfree_skb(skb);
2437 if (err < 0)
2438 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2439 }
2440
2441 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2442 {
2443 struct net *net = sock_net(skb->sk);
2444 struct mr6_table *mrt;
2445 struct mfc6_cache *mfc;
2446 unsigned int t = 0, s_t;
2447 unsigned int h = 0, s_h;
2448 unsigned int e = 0, s_e;
2449
2450 s_t = cb->args[0];
2451 s_h = cb->args[1];
2452 s_e = cb->args[2];
2453
2454 read_lock(&mrt_lock);
2455 ip6mr_for_each_table(mrt, net) {
2456 if (t < s_t)
2457 goto next_table;
2458 if (t > s_t)
2459 s_h = 0;
2460 for (h = s_h; h < MFC6_LINES; h++) {
2461 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2462 if (e < s_e)
2463 goto next_entry;
2464 if (ip6mr_fill_mroute(mrt, skb,
2465 NETLINK_CB(cb->skb).portid,
2466 cb->nlh->nlmsg_seq,
2467 mfc, RTM_NEWROUTE) < 0)
2468 goto done;
2469 next_entry:
2470 e++;
2471 }
2472 e = s_e = 0;
2473 }
2474 spin_lock_bh(&mfc_unres_lock);
2475 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2476 if (e < s_e)
2477 goto next_entry2;
2478 if (ip6mr_fill_mroute(mrt, skb,
2479 NETLINK_CB(cb->skb).portid,
2480 cb->nlh->nlmsg_seq,
2481 mfc, RTM_NEWROUTE) < 0) {
2482 spin_unlock_bh(&mfc_unres_lock);
2483 goto done;
2484 }
2485 next_entry2:
2486 e++;
2487 }
2488 spin_unlock_bh(&mfc_unres_lock);
2489 e = s_e = 0;
2490 s_h = 0;
2491 next_table:
2492 t++;
2493 }
2494 done:
2495 read_unlock(&mrt_lock);
2496
2497 cb->args[2] = e;
2498 cb->args[1] = h;
2499 cb->args[0] = t;
2500
2501 return skb->len;
2502 }