2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list
;
63 struct sock
*mroute6_sk
;
64 struct timer_list ipmr_expire_timer
;
65 struct list_head mfc6_unres_queue
;
66 struct list_head mfc6_cache_array
[MFC6_LINES
];
67 struct mif_device vif6_table
[MAXMIFS
];
69 atomic_t cache_resolve_queue_len
;
70 bool mroute_do_assert
;
72 #ifdef CONFIG_IPV6_PIMSM_V2
73 int mroute_reg_vif_num
;
78 struct fib_rule common
;
82 struct mr6_table
*mrt
;
85 /* Big lock, protecting vif table, mrt cache and mroute socket state.
86 Note that the changes are semaphored via rtnl_lock.
89 static DEFINE_RWLOCK(mrt_lock
);
92 * Multicast router control variables
95 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock
);
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
105 In this case data path is free of exclusive locks at all.
108 static struct kmem_cache
*mrt_cachep __read_mostly
;
110 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
);
111 static void ip6mr_free_table(struct mr6_table
*mrt
);
113 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
114 struct sk_buff
*skb
, struct mfc6_cache
*cache
);
115 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
116 mifi_t mifi
, int assert);
117 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
118 struct mfc6_cache
*c
, struct rtmsg
*rtm
);
119 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
121 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
122 struct netlink_callback
*cb
);
123 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
);
124 static void ipmr_expire_process(unsigned long arg
);
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
130 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
132 struct mr6_table
*mrt
;
134 ip6mr_for_each_table(mrt
, net
) {
141 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
142 struct mr6_table
**mrt
)
145 struct ip6mr_result res
;
146 struct fib_lookup_arg arg
= {
148 .flags
= FIB_LOOKUP_NOREF
,
151 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
,
152 flowi6_to_flowi(flp6
), 0, &arg
);
159 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
160 int flags
, struct fib_lookup_arg
*arg
)
162 struct ip6mr_result
*res
= arg
->result
;
163 struct mr6_table
*mrt
;
165 switch (rule
->action
) {
168 case FR_ACT_UNREACHABLE
:
170 case FR_ACT_PROHIBIT
:
172 case FR_ACT_BLACKHOLE
:
177 mrt
= ip6mr_get_table(rule
->fr_net
, rule
->table
);
184 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
189 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
193 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
194 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
199 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
205 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
206 struct fib_rule_hdr
*frh
)
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template
= {
215 .family
= RTNL_FAMILY_IP6MR
,
216 .rule_size
= sizeof(struct ip6mr_rule
),
217 .addr_size
= sizeof(struct in6_addr
),
218 .action
= ip6mr_rule_action
,
219 .match
= ip6mr_rule_match
,
220 .configure
= ip6mr_rule_configure
,
221 .compare
= ip6mr_rule_compare
,
222 .default_pref
= fib_default_rule_pref
,
223 .fill
= ip6mr_rule_fill
,
224 .nlgroup
= RTNLGRP_IPV6_RULE
,
225 .policy
= ip6mr_rule_policy
,
226 .owner
= THIS_MODULE
,
229 static int __net_init
ip6mr_rules_init(struct net
*net
)
231 struct fib_rules_ops
*ops
;
232 struct mr6_table
*mrt
;
235 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
239 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
241 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
247 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
251 net
->ipv6
.mr6_rules_ops
= ops
;
257 fib_rules_unregister(ops
);
261 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
263 struct mr6_table
*mrt
, *next
;
266 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
) {
267 list_del(&mrt
->list
);
268 ip6mr_free_table(mrt
);
271 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
274 #define ip6mr_for_each_table(mrt, net) \
275 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
277 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
279 return net
->ipv6
.mrt6
;
282 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
283 struct mr6_table
**mrt
)
285 *mrt
= net
->ipv6
.mrt6
;
289 static int __net_init
ip6mr_rules_init(struct net
*net
)
291 net
->ipv6
.mrt6
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
292 return net
->ipv6
.mrt6
? 0 : -ENOMEM
;
295 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
298 ip6mr_free_table(net
->ipv6
.mrt6
);
299 net
->ipv6
.mrt6
= NULL
;
304 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
)
306 struct mr6_table
*mrt
;
309 mrt
= ip6mr_get_table(net
, id
);
313 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
317 write_pnet(&mrt
->net
, net
);
319 /* Forwarding cache */
320 for (i
= 0; i
< MFC6_LINES
; i
++)
321 INIT_LIST_HEAD(&mrt
->mfc6_cache_array
[i
]);
323 INIT_LIST_HEAD(&mrt
->mfc6_unres_queue
);
325 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
328 #ifdef CONFIG_IPV6_PIMSM_V2
329 mrt
->mroute_reg_vif_num
= -1;
331 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
332 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
337 static void ip6mr_free_table(struct mr6_table
*mrt
)
339 del_timer_sync(&mrt
->ipmr_expire_timer
);
340 mroute_clean_tables(mrt
, true);
344 #ifdef CONFIG_PROC_FS
346 struct ipmr_mfc_iter
{
347 struct seq_net_private p
;
348 struct mr6_table
*mrt
;
349 struct list_head
*cache
;
354 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct net
*net
,
355 struct ipmr_mfc_iter
*it
, loff_t pos
)
357 struct mr6_table
*mrt
= it
->mrt
;
358 struct mfc6_cache
*mfc
;
360 read_lock(&mrt_lock
);
361 for (it
->ct
= 0; it
->ct
< MFC6_LINES
; it
->ct
++) {
362 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
363 list_for_each_entry(mfc
, it
->cache
, list
)
367 read_unlock(&mrt_lock
);
369 spin_lock_bh(&mfc_unres_lock
);
370 it
->cache
= &mrt
->mfc6_unres_queue
;
371 list_for_each_entry(mfc
, it
->cache
, list
)
374 spin_unlock_bh(&mfc_unres_lock
);
381 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
384 struct ipmr_vif_iter
{
385 struct seq_net_private p
;
386 struct mr6_table
*mrt
;
390 static struct mif_device
*ip6mr_vif_seq_idx(struct net
*net
,
391 struct ipmr_vif_iter
*iter
,
394 struct mr6_table
*mrt
= iter
->mrt
;
396 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
397 if (!MIF_EXISTS(mrt
, iter
->ct
))
400 return &mrt
->vif6_table
[iter
->ct
];
405 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
408 struct ipmr_vif_iter
*iter
= seq
->private;
409 struct net
*net
= seq_file_net(seq
);
410 struct mr6_table
*mrt
;
412 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
414 return ERR_PTR(-ENOENT
);
418 read_lock(&mrt_lock
);
419 return *pos
? ip6mr_vif_seq_idx(net
, seq
->private, *pos
- 1)
423 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
425 struct ipmr_vif_iter
*iter
= seq
->private;
426 struct net
*net
= seq_file_net(seq
);
427 struct mr6_table
*mrt
= iter
->mrt
;
430 if (v
== SEQ_START_TOKEN
)
431 return ip6mr_vif_seq_idx(net
, iter
, 0);
433 while (++iter
->ct
< mrt
->maxvif
) {
434 if (!MIF_EXISTS(mrt
, iter
->ct
))
436 return &mrt
->vif6_table
[iter
->ct
];
441 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
444 read_unlock(&mrt_lock
);
447 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
449 struct ipmr_vif_iter
*iter
= seq
->private;
450 struct mr6_table
*mrt
= iter
->mrt
;
452 if (v
== SEQ_START_TOKEN
) {
454 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
456 const struct mif_device
*vif
= v
;
457 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
460 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
461 vif
- mrt
->vif6_table
,
462 name
, vif
->bytes_in
, vif
->pkt_in
,
463 vif
->bytes_out
, vif
->pkt_out
,
469 static const struct seq_operations ip6mr_vif_seq_ops
= {
470 .start
= ip6mr_vif_seq_start
,
471 .next
= ip6mr_vif_seq_next
,
472 .stop
= ip6mr_vif_seq_stop
,
473 .show
= ip6mr_vif_seq_show
,
476 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
478 return seq_open_net(inode
, file
, &ip6mr_vif_seq_ops
,
479 sizeof(struct ipmr_vif_iter
));
482 static const struct file_operations ip6mr_vif_fops
= {
483 .owner
= THIS_MODULE
,
484 .open
= ip6mr_vif_open
,
487 .release
= seq_release_net
,
490 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
492 struct ipmr_mfc_iter
*it
= seq
->private;
493 struct net
*net
= seq_file_net(seq
);
494 struct mr6_table
*mrt
;
496 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
498 return ERR_PTR(-ENOENT
);
501 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
505 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
507 struct mfc6_cache
*mfc
= v
;
508 struct ipmr_mfc_iter
*it
= seq
->private;
509 struct net
*net
= seq_file_net(seq
);
510 struct mr6_table
*mrt
= it
->mrt
;
514 if (v
== SEQ_START_TOKEN
)
515 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
517 if (mfc
->list
.next
!= it
->cache
)
518 return list_entry(mfc
->list
.next
, struct mfc6_cache
, list
);
520 if (it
->cache
== &mrt
->mfc6_unres_queue
)
523 BUG_ON(it
->cache
!= &mrt
->mfc6_cache_array
[it
->ct
]);
525 while (++it
->ct
< MFC6_LINES
) {
526 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
527 if (list_empty(it
->cache
))
529 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
532 /* exhausted cache_array, show unresolved */
533 read_unlock(&mrt_lock
);
534 it
->cache
= &mrt
->mfc6_unres_queue
;
537 spin_lock_bh(&mfc_unres_lock
);
538 if (!list_empty(it
->cache
))
539 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
542 spin_unlock_bh(&mfc_unres_lock
);
548 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
550 struct ipmr_mfc_iter
*it
= seq
->private;
551 struct mr6_table
*mrt
= it
->mrt
;
553 if (it
->cache
== &mrt
->mfc6_unres_queue
)
554 spin_unlock_bh(&mfc_unres_lock
);
555 else if (it
->cache
== &mrt
->mfc6_cache_array
[it
->ct
])
556 read_unlock(&mrt_lock
);
559 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
563 if (v
== SEQ_START_TOKEN
) {
567 "Iif Pkts Bytes Wrong Oifs\n");
569 const struct mfc6_cache
*mfc
= v
;
570 const struct ipmr_mfc_iter
*it
= seq
->private;
571 struct mr6_table
*mrt
= it
->mrt
;
573 seq_printf(seq
, "%pI6 %pI6 %-3hd",
574 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
577 if (it
->cache
!= &mrt
->mfc6_unres_queue
) {
578 seq_printf(seq
, " %8lu %8lu %8lu",
580 mfc
->mfc_un
.res
.bytes
,
581 mfc
->mfc_un
.res
.wrong_if
);
582 for (n
= mfc
->mfc_un
.res
.minvif
;
583 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
584 if (MIF_EXISTS(mrt
, n
) &&
585 mfc
->mfc_un
.res
.ttls
[n
] < 255)
588 n
, mfc
->mfc_un
.res
.ttls
[n
]);
591 /* unresolved mfc_caches don't contain
592 * pkt, bytes and wrong_if values
594 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
601 static const struct seq_operations ipmr_mfc_seq_ops
= {
602 .start
= ipmr_mfc_seq_start
,
603 .next
= ipmr_mfc_seq_next
,
604 .stop
= ipmr_mfc_seq_stop
,
605 .show
= ipmr_mfc_seq_show
,
608 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
610 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
611 sizeof(struct ipmr_mfc_iter
));
614 static const struct file_operations ip6mr_mfc_fops
= {
615 .owner
= THIS_MODULE
,
616 .open
= ipmr_mfc_open
,
619 .release
= seq_release_net
,
623 #ifdef CONFIG_IPV6_PIMSM_V2
625 static int pim6_rcv(struct sk_buff
*skb
)
627 struct pimreghdr
*pim
;
628 struct ipv6hdr
*encap
;
629 struct net_device
*reg_dev
= NULL
;
630 struct net
*net
= dev_net(skb
->dev
);
631 struct mr6_table
*mrt
;
632 struct flowi6 fl6
= {
633 .flowi6_iif
= skb
->dev
->ifindex
,
634 .flowi6_mark
= skb
->mark
,
638 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
641 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
642 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_REGISTER
) ||
643 (pim
->flags
& PIM_NULL_REGISTER
) ||
644 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
645 sizeof(*pim
), IPPROTO_PIM
,
646 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
647 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
650 /* check if the inner packet is destined to mcast group */
651 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
654 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
655 encap
->payload_len
== 0 ||
656 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
659 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
661 reg_vif_num
= mrt
->mroute_reg_vif_num
;
663 read_lock(&mrt_lock
);
664 if (reg_vif_num
>= 0)
665 reg_dev
= mrt
->vif6_table
[reg_vif_num
].dev
;
668 read_unlock(&mrt_lock
);
673 skb
->mac_header
= skb
->network_header
;
674 skb_pull(skb
, (u8
*)encap
- skb
->data
);
675 skb_reset_network_header(skb
);
676 skb
->protocol
= htons(ETH_P_IPV6
);
677 skb
->ip_summed
= CHECKSUM_NONE
;
678 skb
->pkt_type
= PACKET_HOST
;
680 skb_tunnel_rx(skb
, reg_dev
);
691 static const struct inet6_protocol pim6_protocol
= {
695 /* Service routines creating virtual interfaces: PIMREG */
697 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
698 struct net_device
*dev
)
700 struct net
*net
= dev_net(dev
);
701 struct mr6_table
*mrt
;
702 struct flowi6 fl6
= {
703 .flowi6_oif
= dev
->ifindex
,
704 .flowi6_iif
= skb
->skb_iif
,
705 .flowi6_mark
= skb
->mark
,
709 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
715 read_lock(&mrt_lock
);
716 dev
->stats
.tx_bytes
+= skb
->len
;
717 dev
->stats
.tx_packets
++;
718 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
719 read_unlock(&mrt_lock
);
724 static const struct net_device_ops reg_vif_netdev_ops
= {
725 .ndo_start_xmit
= reg_vif_xmit
,
728 static void reg_vif_setup(struct net_device
*dev
)
730 dev
->type
= ARPHRD_PIMREG
;
731 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
732 dev
->flags
= IFF_NOARP
;
733 dev
->netdev_ops
= ®_vif_netdev_ops
;
734 dev
->destructor
= free_netdev
;
735 dev
->features
|= NETIF_F_NETNS_LOCAL
;
738 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr6_table
*mrt
)
740 struct net_device
*dev
;
743 if (mrt
->id
== RT6_TABLE_DFLT
)
744 sprintf(name
, "pim6reg");
746 sprintf(name
, "pim6reg%u", mrt
->id
);
748 dev
= alloc_netdev(0, name
, reg_vif_setup
);
752 dev_net_set(dev
, net
);
754 if (register_netdevice(dev
)) {
767 /* allow the register to be completed before unregistering. */
771 unregister_netdevice(dev
);
780 static int mif6_delete(struct mr6_table
*mrt
, int vifi
, int notify
,
781 struct list_head
*head
)
783 struct mif_device
*v
;
784 struct net_device
*dev
;
785 struct inet6_dev
*in6_dev
;
787 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
788 return -EADDRNOTAVAIL
;
790 v
= &mrt
->vif6_table
[vifi
];
792 write_lock_bh(&mrt_lock
);
797 write_unlock_bh(&mrt_lock
);
798 return -EADDRNOTAVAIL
;
801 #ifdef CONFIG_IPV6_PIMSM_V2
802 if (vifi
== mrt
->mroute_reg_vif_num
)
803 mrt
->mroute_reg_vif_num
= -1;
806 if (vifi
+ 1 == mrt
->maxvif
) {
808 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
809 if (MIF_EXISTS(mrt
, tmp
))
812 mrt
->maxvif
= tmp
+ 1;
815 write_unlock_bh(&mrt_lock
);
817 dev_set_allmulti(dev
, -1);
819 in6_dev
= __in6_dev_get(dev
);
821 in6_dev
->cnf
.mc_forwarding
--;
822 inet6_netconf_notify_devconf(dev_net(dev
),
823 NETCONFA_MC_FORWARDING
,
824 dev
->ifindex
, &in6_dev
->cnf
);
827 if ((v
->flags
& MIFF_REGISTER
) && !notify
)
828 unregister_netdevice_queue(dev
, head
);
834 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
836 kmem_cache_free(mrt_cachep
, c
);
839 /* Destroy an unresolved cache entry, killing queued skbs
840 and reporting error to netlink readers.
843 static void ip6mr_destroy_unres(struct mr6_table
*mrt
, struct mfc6_cache
*c
)
845 struct net
*net
= read_pnet(&mrt
->net
);
848 atomic_dec(&mrt
->cache_resolve_queue_len
);
850 while((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
851 if (ipv6_hdr(skb
)->version
== 0) {
852 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
853 nlh
->nlmsg_type
= NLMSG_ERROR
;
854 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
855 skb_trim(skb
, nlh
->nlmsg_len
);
856 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -ETIMEDOUT
;
857 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
866 /* Timer process for all the unresolved queue. */
868 static void ipmr_do_expire_process(struct mr6_table
*mrt
)
870 unsigned long now
= jiffies
;
871 unsigned long expires
= 10 * HZ
;
872 struct mfc6_cache
*c
, *next
;
874 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
875 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
877 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
878 if (interval
< expires
)
884 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
885 ip6mr_destroy_unres(mrt
, c
);
888 if (!list_empty(&mrt
->mfc6_unres_queue
))
889 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
892 static void ipmr_expire_process(unsigned long arg
)
894 struct mr6_table
*mrt
= (struct mr6_table
*)arg
;
896 if (!spin_trylock(&mfc_unres_lock
)) {
897 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
901 if (!list_empty(&mrt
->mfc6_unres_queue
))
902 ipmr_do_expire_process(mrt
);
904 spin_unlock(&mfc_unres_lock
);
907 /* Fill oifs list. It is called under write locked mrt_lock. */
909 static void ip6mr_update_thresholds(struct mr6_table
*mrt
, struct mfc6_cache
*cache
,
914 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
915 cache
->mfc_un
.res
.maxvif
= 0;
916 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
918 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
919 if (MIF_EXISTS(mrt
, vifi
) &&
920 ttls
[vifi
] && ttls
[vifi
] < 255) {
921 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
922 if (cache
->mfc_un
.res
.minvif
> vifi
)
923 cache
->mfc_un
.res
.minvif
= vifi
;
924 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
925 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
930 static int mif6_add(struct net
*net
, struct mr6_table
*mrt
,
931 struct mif6ctl
*vifc
, int mrtsock
)
933 int vifi
= vifc
->mif6c_mifi
;
934 struct mif_device
*v
= &mrt
->vif6_table
[vifi
];
935 struct net_device
*dev
;
936 struct inet6_dev
*in6_dev
;
940 if (MIF_EXISTS(mrt
, vifi
))
943 switch (vifc
->mif6c_flags
) {
944 #ifdef CONFIG_IPV6_PIMSM_V2
947 * Special Purpose VIF in PIM
948 * All the packets will be sent to the daemon
950 if (mrt
->mroute_reg_vif_num
>= 0)
952 dev
= ip6mr_reg_vif(net
, mrt
);
955 err
= dev_set_allmulti(dev
, 1);
957 unregister_netdevice(dev
);
964 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
966 return -EADDRNOTAVAIL
;
967 err
= dev_set_allmulti(dev
, 1);
977 in6_dev
= __in6_dev_get(dev
);
979 in6_dev
->cnf
.mc_forwarding
++;
980 inet6_netconf_notify_devconf(dev_net(dev
),
981 NETCONFA_MC_FORWARDING
,
982 dev
->ifindex
, &in6_dev
->cnf
);
986 * Fill in the VIF structures
988 v
->rate_limit
= vifc
->vifc_rate_limit
;
989 v
->flags
= vifc
->mif6c_flags
;
991 v
->flags
|= VIFF_STATIC
;
992 v
->threshold
= vifc
->vifc_threshold
;
997 v
->link
= dev
->ifindex
;
998 if (v
->flags
& MIFF_REGISTER
)
999 v
->link
= dev
->iflink
;
1001 /* And finish update writing critical data */
1002 write_lock_bh(&mrt_lock
);
1004 #ifdef CONFIG_IPV6_PIMSM_V2
1005 if (v
->flags
& MIFF_REGISTER
)
1006 mrt
->mroute_reg_vif_num
= vifi
;
1008 if (vifi
+ 1 > mrt
->maxvif
)
1009 mrt
->maxvif
= vifi
+ 1;
1010 write_unlock_bh(&mrt_lock
);
1014 static struct mfc6_cache
*ip6mr_cache_find(struct mr6_table
*mrt
,
1015 const struct in6_addr
*origin
,
1016 const struct in6_addr
*mcastgrp
)
1018 int line
= MFC6_HASH(mcastgrp
, origin
);
1019 struct mfc6_cache
*c
;
1021 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1022 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
1023 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
1029 /* Look for a (*,*,oif) entry */
1030 static struct mfc6_cache
*ip6mr_cache_find_any_parent(struct mr6_table
*mrt
,
1033 int line
= MFC6_HASH(&in6addr_any
, &in6addr_any
);
1034 struct mfc6_cache
*c
;
1036 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1037 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1038 ipv6_addr_any(&c
->mf6c_mcastgrp
) &&
1039 (c
->mfc_un
.res
.ttls
[mifi
] < 255))
1045 /* Look for a (*,G) entry */
1046 static struct mfc6_cache
*ip6mr_cache_find_any(struct mr6_table
*mrt
,
1047 struct in6_addr
*mcastgrp
,
1050 int line
= MFC6_HASH(mcastgrp
, &in6addr_any
);
1051 struct mfc6_cache
*c
, *proxy
;
1053 if (ipv6_addr_any(mcastgrp
))
1056 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1057 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1058 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
)) {
1059 if (c
->mfc_un
.res
.ttls
[mifi
] < 255)
1062 /* It's ok if the mifi is part of the static tree */
1063 proxy
= ip6mr_cache_find_any_parent(mrt
,
1065 if (proxy
&& proxy
->mfc_un
.res
.ttls
[mifi
] < 255)
1070 return ip6mr_cache_find_any_parent(mrt
, mifi
);
1074 * Allocate a multicast cache entry
1076 static struct mfc6_cache
*ip6mr_cache_alloc(void)
1078 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1081 c
->mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
1082 c
->mfc_un
.res
.minvif
= MAXMIFS
;
1086 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
1088 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1091 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1092 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
1097 * A cache entry has gone into a resolved state from queued
1100 static void ip6mr_cache_resolve(struct net
*net
, struct mr6_table
*mrt
,
1101 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1103 struct sk_buff
*skb
;
1106 * Play the pending entries through our router
1109 while((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1110 if (ipv6_hdr(skb
)->version
== 0) {
1111 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct ipv6hdr
));
1113 if (__ip6mr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
1114 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1116 nlh
->nlmsg_type
= NLMSG_ERROR
;
1117 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
1118 skb_trim(skb
, nlh
->nlmsg_len
);
1119 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -EMSGSIZE
;
1121 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1123 ip6_mr_forward(net
, mrt
, skb
, c
);
1128 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1129 * expects the following bizarre scheme.
1131 * Called under mrt_lock.
1134 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
1135 mifi_t mifi
, int assert)
1137 struct sk_buff
*skb
;
1138 struct mrt6msg
*msg
;
1141 #ifdef CONFIG_IPV6_PIMSM_V2
1142 if (assert == MRT6MSG_WHOLEPKT
)
1143 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1147 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1152 /* I suppose that internal messages
1153 * do not require checksums */
1155 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1157 #ifdef CONFIG_IPV6_PIMSM_V2
1158 if (assert == MRT6MSG_WHOLEPKT
) {
1159 /* Ugly, but we have no choice with this interface.
1160 Duplicate old header, fix length etc.
1161 And all this only to mangle msg->im6_msgtype and
1162 to set msg->im6_mbz to "mbz" :-)
1164 skb_push(skb
, -skb_network_offset(pkt
));
1166 skb_push(skb
, sizeof(*msg
));
1167 skb_reset_transport_header(skb
);
1168 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1170 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1171 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1173 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1174 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1176 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1181 * Copy the IP header
1184 skb_put(skb
, sizeof(struct ipv6hdr
));
1185 skb_reset_network_header(skb
);
1186 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1191 skb_put(skb
, sizeof(*msg
));
1192 skb_reset_transport_header(skb
);
1193 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1196 msg
->im6_msgtype
= assert;
1197 msg
->im6_mif
= mifi
;
1199 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1200 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1202 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1203 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1206 if (mrt
->mroute6_sk
== NULL
) {
1212 * Deliver to user space multicast routing algorithms
1214 ret
= sock_queue_rcv_skb(mrt
->mroute6_sk
, skb
);
1216 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1224 * Queue a packet for resolution. It gets locked cache entry!
1228 ip6mr_cache_unresolved(struct mr6_table
*mrt
, mifi_t mifi
, struct sk_buff
*skb
)
1232 struct mfc6_cache
*c
;
1234 spin_lock_bh(&mfc_unres_lock
);
1235 list_for_each_entry(c
, &mrt
->mfc6_unres_queue
, list
) {
1236 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1237 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1245 * Create a new entry if allowable
1248 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1249 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
1250 spin_unlock_bh(&mfc_unres_lock
);
1257 * Fill in the new cache entry
1259 c
->mf6c_parent
= -1;
1260 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1261 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1264 * Reflect first query at pim6sd
1266 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1268 /* If the report failed throw the cache entry
1271 spin_unlock_bh(&mfc_unres_lock
);
1273 ip6mr_cache_free(c
);
1278 atomic_inc(&mrt
->cache_resolve_queue_len
);
1279 list_add(&c
->list
, &mrt
->mfc6_unres_queue
);
1280 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1282 ipmr_do_expire_process(mrt
);
1286 * See if we can append the packet
1288 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1292 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1296 spin_unlock_bh(&mfc_unres_lock
);
1301 * MFC6 cache manipulation by user space
1304 static int ip6mr_mfc_delete(struct mr6_table
*mrt
, struct mf6cctl
*mfc
,
1308 struct mfc6_cache
*c
, *next
;
1310 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1312 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[line
], list
) {
1313 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1314 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1315 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1316 (parent
== -1 || parent
== c
->mf6c_parent
)) {
1317 write_lock_bh(&mrt_lock
);
1319 write_unlock_bh(&mrt_lock
);
1321 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1322 ip6mr_cache_free(c
);
1329 static int ip6mr_device_event(struct notifier_block
*this,
1330 unsigned long event
, void *ptr
)
1332 struct net_device
*dev
= ptr
;
1333 struct net
*net
= dev_net(dev
);
1334 struct mr6_table
*mrt
;
1335 struct mif_device
*v
;
1338 if (event
!= NETDEV_UNREGISTER
)
1341 ip6mr_for_each_table(mrt
, net
) {
1342 v
= &mrt
->vif6_table
[0];
1343 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1345 mif6_delete(mrt
, ct
, 1, NULL
);
1352 static struct notifier_block ip6_mr_notifier
= {
1353 .notifier_call
= ip6mr_device_event
1357 * Setup for IP multicast routing
1360 static int __net_init
ip6mr_net_init(struct net
*net
)
1364 err
= ip6mr_rules_init(net
);
1368 #ifdef CONFIG_PROC_FS
1370 if (!proc_create("ip6_mr_vif", 0, net
->proc_net
, &ip6mr_vif_fops
))
1372 if (!proc_create("ip6_mr_cache", 0, net
->proc_net
, &ip6mr_mfc_fops
))
1373 goto proc_cache_fail
;
1378 #ifdef CONFIG_PROC_FS
1380 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1382 ip6mr_rules_exit(net
);
1388 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1390 #ifdef CONFIG_PROC_FS
1391 remove_proc_entry("ip6_mr_cache", net
->proc_net
);
1392 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1394 ip6mr_rules_exit(net
);
1397 static struct pernet_operations ip6mr_net_ops
= {
1398 .init
= ip6mr_net_init
,
1399 .exit
= ip6mr_net_exit
,
1402 int __init
ip6_mr_init(void)
1406 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1407 sizeof(struct mfc6_cache
),
1408 0, SLAB_HWCACHE_ALIGN
,
1413 err
= register_pernet_subsys(&ip6mr_net_ops
);
1415 goto reg_pernet_fail
;
1417 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1419 goto reg_notif_fail
;
1420 #ifdef CONFIG_IPV6_PIMSM_V2
1421 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1422 pr_err("%s: can't add PIM protocol\n", __func__
);
1424 goto add_proto_fail
;
1427 rtnl_register(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
, NULL
,
1428 ip6mr_rtm_dumproute
, NULL
);
1430 #ifdef CONFIG_IPV6_PIMSM_V2
1432 unregister_netdevice_notifier(&ip6_mr_notifier
);
1435 unregister_pernet_subsys(&ip6mr_net_ops
);
1437 kmem_cache_destroy(mrt_cachep
);
1441 void ip6_mr_cleanup(void)
1443 unregister_netdevice_notifier(&ip6_mr_notifier
);
1444 unregister_pernet_subsys(&ip6mr_net_ops
);
1445 kmem_cache_destroy(mrt_cachep
);
1448 static int ip6mr_mfc_add(struct net
*net
, struct mr6_table
*mrt
,
1449 struct mf6cctl
*mfc
, int mrtsock
, int parent
)
1453 struct mfc6_cache
*uc
, *c
;
1454 unsigned char ttls
[MAXMIFS
];
1457 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1460 memset(ttls
, 255, MAXMIFS
);
1461 for (i
= 0; i
< MAXMIFS
; i
++) {
1462 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1467 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1469 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1470 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1471 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1472 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1473 (parent
== -1 || parent
== mfc
->mf6cc_parent
)) {
1480 write_lock_bh(&mrt_lock
);
1481 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1482 ip6mr_update_thresholds(mrt
, c
, ttls
);
1484 c
->mfc_flags
|= MFC_STATIC
;
1485 write_unlock_bh(&mrt_lock
);
1486 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1490 if (!ipv6_addr_any(&mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1491 !ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1494 c
= ip6mr_cache_alloc();
1498 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1499 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1500 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1501 ip6mr_update_thresholds(mrt
, c
, ttls
);
1503 c
->mfc_flags
|= MFC_STATIC
;
1505 write_lock_bh(&mrt_lock
);
1506 list_add(&c
->list
, &mrt
->mfc6_cache_array
[line
]);
1507 write_unlock_bh(&mrt_lock
);
1510 * Check to see if we resolved a queued list. If so we
1511 * need to send on the frames and tidy up.
1514 spin_lock_bh(&mfc_unres_lock
);
1515 list_for_each_entry(uc
, &mrt
->mfc6_unres_queue
, list
) {
1516 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1517 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1518 list_del(&uc
->list
);
1519 atomic_dec(&mrt
->cache_resolve_queue_len
);
1524 if (list_empty(&mrt
->mfc6_unres_queue
))
1525 del_timer(&mrt
->ipmr_expire_timer
);
1526 spin_unlock_bh(&mfc_unres_lock
);
1529 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1530 ip6mr_cache_free(uc
);
1532 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1537 * Close the multicast socket, and clear the vif tables etc
1540 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
)
1544 struct mfc6_cache
*c
, *next
;
1547 * Shut down all active vif entries
1549 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1550 if (!all
&& (mrt
->vif6_table
[i
].flags
& VIFF_STATIC
))
1552 mif6_delete(mrt
, i
, 0, &list
);
1554 unregister_netdevice_many(&list
);
1559 for (i
= 0; i
< MFC6_LINES
; i
++) {
1560 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[i
], list
) {
1561 if (!all
&& (c
->mfc_flags
& MFC_STATIC
))
1563 write_lock_bh(&mrt_lock
);
1565 write_unlock_bh(&mrt_lock
);
1567 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1568 ip6mr_cache_free(c
);
1572 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1573 spin_lock_bh(&mfc_unres_lock
);
1574 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
1576 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1577 ip6mr_destroy_unres(mrt
, c
);
1579 spin_unlock_bh(&mfc_unres_lock
);
1583 static int ip6mr_sk_init(struct mr6_table
*mrt
, struct sock
*sk
)
1586 struct net
*net
= sock_net(sk
);
1589 write_lock_bh(&mrt_lock
);
1590 if (likely(mrt
->mroute6_sk
== NULL
)) {
1591 mrt
->mroute6_sk
= sk
;
1592 net
->ipv6
.devconf_all
->mc_forwarding
++;
1593 inet6_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
,
1594 NETCONFA_IFINDEX_ALL
,
1595 net
->ipv6
.devconf_all
);
1599 write_unlock_bh(&mrt_lock
);
1606 int ip6mr_sk_done(struct sock
*sk
)
1609 struct net
*net
= sock_net(sk
);
1610 struct mr6_table
*mrt
;
1613 ip6mr_for_each_table(mrt
, net
) {
1614 if (sk
== mrt
->mroute6_sk
) {
1615 write_lock_bh(&mrt_lock
);
1616 mrt
->mroute6_sk
= NULL
;
1617 net
->ipv6
.devconf_all
->mc_forwarding
--;
1618 inet6_netconf_notify_devconf(net
,
1619 NETCONFA_MC_FORWARDING
,
1620 NETCONFA_IFINDEX_ALL
,
1621 net
->ipv6
.devconf_all
);
1622 write_unlock_bh(&mrt_lock
);
1624 mroute_clean_tables(mrt
, false);
1634 struct sock
*mroute6_socket(struct net
*net
, struct sk_buff
*skb
)
1636 struct mr6_table
*mrt
;
1637 struct flowi6 fl6
= {
1638 .flowi6_iif
= skb
->skb_iif
,
1639 .flowi6_oif
= skb
->dev
->ifindex
,
1640 .flowi6_mark
= skb
->mark
,
1643 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
1646 return mrt
->mroute6_sk
;
1650 * Socket options and virtual interface manipulation. The whole
1651 * virtual interface system is a complete heap, but unfortunately
1652 * that's how BSD mrouted happens to think. Maybe one day with a proper
1653 * MOSPF/PIM router set up we can clean this up.
1656 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1658 int ret
, parent
= 0;
1662 struct net
*net
= sock_net(sk
);
1663 struct mr6_table
*mrt
;
1665 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1669 if (optname
!= MRT6_INIT
) {
1670 if (sk
!= mrt
->mroute6_sk
&& !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1676 if (sk
->sk_type
!= SOCK_RAW
||
1677 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1679 if (optlen
< sizeof(int))
1682 return ip6mr_sk_init(mrt
, sk
);
1685 return ip6mr_sk_done(sk
);
1688 if (optlen
< sizeof(vif
))
1690 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1692 if (vif
.mif6c_mifi
>= MAXMIFS
)
1695 ret
= mif6_add(net
, mrt
, &vif
, sk
== mrt
->mroute6_sk
);
1700 if (optlen
< sizeof(mifi_t
))
1702 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1705 ret
= mif6_delete(mrt
, mifi
, 0, NULL
);
1710 * Manipulate the forwarding caches. These live
1711 * in a sort of kernel/user symbiosis.
1716 case MRT6_ADD_MFC_PROXY
:
1717 case MRT6_DEL_MFC_PROXY
:
1718 if (optlen
< sizeof(mfc
))
1720 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1723 parent
= mfc
.mf6cc_parent
;
1725 if (optname
== MRT6_DEL_MFC
|| optname
== MRT6_DEL_MFC_PROXY
)
1726 ret
= ip6mr_mfc_delete(mrt
, &mfc
, parent
);
1728 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
,
1729 sk
== mrt
->mroute6_sk
, parent
);
1734 * Control PIM assert (to activate pim will activate assert)
1740 if (optlen
!= sizeof(v
))
1742 if (get_user(v
, (int __user
*)optval
))
1744 mrt
->mroute_do_assert
= v
;
1748 #ifdef CONFIG_IPV6_PIMSM_V2
1753 if (optlen
!= sizeof(v
))
1755 if (get_user(v
, (int __user
*)optval
))
1760 if (v
!= mrt
->mroute_do_pim
) {
1761 mrt
->mroute_do_pim
= v
;
1762 mrt
->mroute_do_assert
= v
;
1769 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1774 if (optlen
!= sizeof(u32
))
1776 if (get_user(v
, (u32 __user
*)optval
))
1778 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1779 if (v
!= RT_TABLE_DEFAULT
&& v
>= 100000000)
1781 if (sk
== mrt
->mroute6_sk
)
1786 if (!ip6mr_new_table(net
, v
))
1788 raw6_sk(sk
)->ip6mr_table
= v
;
1794 * Spurious command, or MRT6_VERSION which you cannot
1798 return -ENOPROTOOPT
;
1803 * Getsock opt support for the multicast routing system.
1806 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1811 struct net
*net
= sock_net(sk
);
1812 struct mr6_table
*mrt
;
1814 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1824 val
= mrt
->mroute_do_pim
;
1828 val
= mrt
->mroute_do_assert
;
1831 return -ENOPROTOOPT
;
1834 if (get_user(olr
, optlen
))
1837 olr
= min_t(int, olr
, sizeof(int));
1841 if (put_user(olr
, optlen
))
1843 if (copy_to_user(optval
, &val
, olr
))
1849 * The IP multicast ioctl support routines.
1852 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1854 struct sioc_sg_req6 sr
;
1855 struct sioc_mif_req6 vr
;
1856 struct mif_device
*vif
;
1857 struct mfc6_cache
*c
;
1858 struct net
*net
= sock_net(sk
);
1859 struct mr6_table
*mrt
;
1861 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1866 case SIOCGETMIFCNT_IN6
:
1867 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1869 if (vr
.mifi
>= mrt
->maxvif
)
1871 read_lock(&mrt_lock
);
1872 vif
= &mrt
->vif6_table
[vr
.mifi
];
1873 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1874 vr
.icount
= vif
->pkt_in
;
1875 vr
.ocount
= vif
->pkt_out
;
1876 vr
.ibytes
= vif
->bytes_in
;
1877 vr
.obytes
= vif
->bytes_out
;
1878 read_unlock(&mrt_lock
);
1880 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1884 read_unlock(&mrt_lock
);
1885 return -EADDRNOTAVAIL
;
1886 case SIOCGETSGCNT_IN6
:
1887 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1890 read_lock(&mrt_lock
);
1891 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1893 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1894 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1895 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1896 read_unlock(&mrt_lock
);
1898 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1902 read_unlock(&mrt_lock
);
1903 return -EADDRNOTAVAIL
;
1905 return -ENOIOCTLCMD
;
1909 #ifdef CONFIG_COMPAT
1910 struct compat_sioc_sg_req6
{
1911 struct sockaddr_in6 src
;
1912 struct sockaddr_in6 grp
;
1913 compat_ulong_t pktcnt
;
1914 compat_ulong_t bytecnt
;
1915 compat_ulong_t wrong_if
;
1918 struct compat_sioc_mif_req6
{
1920 compat_ulong_t icount
;
1921 compat_ulong_t ocount
;
1922 compat_ulong_t ibytes
;
1923 compat_ulong_t obytes
;
1926 int ip6mr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1928 struct compat_sioc_sg_req6 sr
;
1929 struct compat_sioc_mif_req6 vr
;
1930 struct mif_device
*vif
;
1931 struct mfc6_cache
*c
;
1932 struct net
*net
= sock_net(sk
);
1933 struct mr6_table
*mrt
;
1935 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1940 case SIOCGETMIFCNT_IN6
:
1941 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1943 if (vr
.mifi
>= mrt
->maxvif
)
1945 read_lock(&mrt_lock
);
1946 vif
= &mrt
->vif6_table
[vr
.mifi
];
1947 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1948 vr
.icount
= vif
->pkt_in
;
1949 vr
.ocount
= vif
->pkt_out
;
1950 vr
.ibytes
= vif
->bytes_in
;
1951 vr
.obytes
= vif
->bytes_out
;
1952 read_unlock(&mrt_lock
);
1954 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1958 read_unlock(&mrt_lock
);
1959 return -EADDRNOTAVAIL
;
1960 case SIOCGETSGCNT_IN6
:
1961 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1964 read_lock(&mrt_lock
);
1965 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1967 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1968 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1969 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1970 read_unlock(&mrt_lock
);
1972 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1976 read_unlock(&mrt_lock
);
1977 return -EADDRNOTAVAIL
;
1979 return -ENOIOCTLCMD
;
1984 static inline int ip6mr_forward2_finish(struct sk_buff
*skb
)
1986 IP6_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), ip6_dst_idev(skb_dst(skb
)),
1987 IPSTATS_MIB_OUTFORWDATAGRAMS
);
1988 IP6_ADD_STATS_BH(dev_net(skb_dst(skb
)->dev
), ip6_dst_idev(skb_dst(skb
)),
1989 IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1990 return dst_output(skb
);
1994 * Processing handlers for ip6mr_forward
1997 static int ip6mr_forward2(struct net
*net
, struct mr6_table
*mrt
,
1998 struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
2000 struct ipv6hdr
*ipv6h
;
2001 struct mif_device
*vif
= &mrt
->vif6_table
[vifi
];
2002 struct net_device
*dev
;
2003 struct dst_entry
*dst
;
2006 if (vif
->dev
== NULL
)
2009 #ifdef CONFIG_IPV6_PIMSM_V2
2010 if (vif
->flags
& MIFF_REGISTER
) {
2012 vif
->bytes_out
+= skb
->len
;
2013 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
2014 vif
->dev
->stats
.tx_packets
++;
2015 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
2020 ipv6h
= ipv6_hdr(skb
);
2022 fl6
= (struct flowi6
) {
2023 .flowi6_oif
= vif
->link
,
2024 .daddr
= ipv6h
->daddr
,
2027 dst
= ip6_route_output(net
, NULL
, &fl6
);
2034 skb_dst_set(skb
, dst
);
2037 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2038 * not only before forwarding, but after forwarding on all output
2039 * interfaces. It is clear, if mrouter runs a multicasting
2040 * program, it should receive packets not depending to what interface
2041 * program is joined.
2042 * If we will not make it, the program will have to join on all
2043 * interfaces. On the other hand, multihoming host (or router, but
2044 * not mrouter) cannot join to more than one interface - it will
2045 * result in receiving multiple packets.
2050 vif
->bytes_out
+= skb
->len
;
2052 /* We are about to write */
2053 /* XXX: extension headers? */
2054 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
2057 ipv6h
= ipv6_hdr(skb
);
2060 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
2062 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
2063 ip6mr_forward2_finish
);
2070 static int ip6mr_find_vif(struct mr6_table
*mrt
, struct net_device
*dev
)
2074 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
2075 if (mrt
->vif6_table
[ct
].dev
== dev
)
2081 static int ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
2082 struct sk_buff
*skb
, struct mfc6_cache
*cache
)
2086 int true_vifi
= ip6mr_find_vif(mrt
, skb
->dev
);
2088 vif
= cache
->mf6c_parent
;
2089 cache
->mfc_un
.res
.pkt
++;
2090 cache
->mfc_un
.res
.bytes
+= skb
->len
;
2092 if (ipv6_addr_any(&cache
->mf6c_origin
) && true_vifi
>= 0) {
2093 struct mfc6_cache
*cache_proxy
;
2095 /* For an (*,G) entry, we only check that the incomming
2096 * interface is part of the static tree.
2098 cache_proxy
= ip6mr_cache_find_any_parent(mrt
, vif
);
2100 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
2105 * Wrong interface: drop packet and (maybe) send PIM assert.
2107 if (mrt
->vif6_table
[vif
].dev
!= skb
->dev
) {
2108 cache
->mfc_un
.res
.wrong_if
++;
2110 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
2111 /* pimsm uses asserts, when switching from RPT to SPT,
2112 so that we cannot check that packet arrived on an oif.
2113 It is bad, but otherwise we would need to move pretty
2114 large chunk of pimd to kernel. Ough... --ANK
2116 (mrt
->mroute_do_pim
||
2117 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2119 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
2120 cache
->mfc_un
.res
.last_assert
= jiffies
;
2121 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
2127 mrt
->vif6_table
[vif
].pkt_in
++;
2128 mrt
->vif6_table
[vif
].bytes_in
+= skb
->len
;
2133 if (ipv6_addr_any(&cache
->mf6c_origin
) &&
2134 ipv6_addr_any(&cache
->mf6c_mcastgrp
)) {
2135 if (true_vifi
>= 0 &&
2136 true_vifi
!= cache
->mf6c_parent
&&
2137 ipv6_hdr(skb
)->hop_limit
>
2138 cache
->mfc_un
.res
.ttls
[cache
->mf6c_parent
]) {
2139 /* It's an (*,*) entry and the packet is not coming from
2140 * the upstream: forward the packet to the upstream
2143 psend
= cache
->mf6c_parent
;
2148 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
2149 /* For (*,G) entry, don't forward to the incoming interface */
2150 if ((!ipv6_addr_any(&cache
->mf6c_origin
) || ct
!= true_vifi
) &&
2151 ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
2153 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2155 ip6mr_forward2(net
, mrt
, skb2
, cache
, psend
);
2162 ip6mr_forward2(net
, mrt
, skb
, cache
, psend
);
2173 * Multicast packets for forwarding arrive here
2176 int ip6_mr_input(struct sk_buff
*skb
)
2178 struct mfc6_cache
*cache
;
2179 struct net
*net
= dev_net(skb
->dev
);
2180 struct mr6_table
*mrt
;
2181 struct flowi6 fl6
= {
2182 .flowi6_iif
= skb
->dev
->ifindex
,
2183 .flowi6_mark
= skb
->mark
,
2187 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
2193 read_lock(&mrt_lock
);
2194 cache
= ip6mr_cache_find(mrt
,
2195 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
2196 if (cache
== NULL
) {
2197 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2200 cache
= ip6mr_cache_find_any(mrt
,
2201 &ipv6_hdr(skb
)->daddr
,
2206 * No usable cache entry
2208 if (cache
== NULL
) {
2211 vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2213 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
);
2214 read_unlock(&mrt_lock
);
2218 read_unlock(&mrt_lock
);
2223 ip6_mr_forward(net
, mrt
, skb
, cache
);
2225 read_unlock(&mrt_lock
);
2231 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2232 struct mfc6_cache
*c
, struct rtmsg
*rtm
)
2235 struct rtnexthop
*nhp
;
2236 struct nlattr
*mp_attr
;
2237 struct rta_mfc_stats mfcs
;
2239 /* If cache is unresolved, don't try to parse IIF and OIF */
2240 if (c
->mf6c_parent
>= MAXMIFS
)
2243 if (MIF_EXISTS(mrt
, c
->mf6c_parent
) &&
2244 nla_put_u32(skb
, RTA_IIF
, mrt
->vif6_table
[c
->mf6c_parent
].dev
->ifindex
) < 0)
2246 mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
);
2247 if (mp_attr
== NULL
)
2250 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2251 if (MIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2252 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
2254 nla_nest_cancel(skb
, mp_attr
);
2258 nhp
->rtnh_flags
= 0;
2259 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2260 nhp
->rtnh_ifindex
= mrt
->vif6_table
[ct
].dev
->ifindex
;
2261 nhp
->rtnh_len
= sizeof(*nhp
);
2265 nla_nest_end(skb
, mp_attr
);
2267 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2268 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2269 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2270 if (nla_put(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
) < 0)
2273 rtm
->rtm_type
= RTN_MULTICAST
;
2277 int ip6mr_get_route(struct net
*net
, struct sk_buff
*skb
, struct rtmsg
*rtm
,
2278 int nowait
, u32 portid
)
2281 struct mr6_table
*mrt
;
2282 struct mfc6_cache
*cache
;
2283 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2285 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2289 read_lock(&mrt_lock
);
2290 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2291 if (!cache
&& skb
->dev
) {
2292 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2295 cache
= ip6mr_cache_find_any(mrt
, &rt
->rt6i_dst
.addr
,
2300 struct sk_buff
*skb2
;
2301 struct ipv6hdr
*iph
;
2302 struct net_device
*dev
;
2306 read_unlock(&mrt_lock
);
2311 if (dev
== NULL
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2312 read_unlock(&mrt_lock
);
2316 /* really correct? */
2317 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2319 read_unlock(&mrt_lock
);
2323 NETLINK_CB(skb2
).portid
= portid
;
2324 skb_reset_transport_header(skb2
);
2326 skb_put(skb2
, sizeof(struct ipv6hdr
));
2327 skb_reset_network_header(skb2
);
2329 iph
= ipv6_hdr(skb2
);
2332 iph
->flow_lbl
[0] = 0;
2333 iph
->flow_lbl
[1] = 0;
2334 iph
->flow_lbl
[2] = 0;
2335 iph
->payload_len
= 0;
2336 iph
->nexthdr
= IPPROTO_NONE
;
2338 iph
->saddr
= rt
->rt6i_src
.addr
;
2339 iph
->daddr
= rt
->rt6i_dst
.addr
;
2341 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
);
2342 read_unlock(&mrt_lock
);
2347 if (!nowait
&& (rtm
->rtm_flags
&RTM_F_NOTIFY
))
2348 cache
->mfc_flags
|= MFC_NOTIFY
;
2350 err
= __ip6mr_fill_mroute(mrt
, skb
, cache
, rtm
);
2351 read_unlock(&mrt_lock
);
2355 static int ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2356 u32 portid
, u32 seq
, struct mfc6_cache
*c
, int cmd
,
2359 struct nlmsghdr
*nlh
;
2363 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2367 rtm
= nlmsg_data(nlh
);
2368 rtm
->rtm_family
= RTNL_FAMILY_IP6MR
;
2369 rtm
->rtm_dst_len
= 128;
2370 rtm
->rtm_src_len
= 128;
2372 rtm
->rtm_table
= mrt
->id
;
2373 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2374 goto nla_put_failure
;
2375 rtm
->rtm_type
= RTN_MULTICAST
;
2376 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2377 if (c
->mfc_flags
& MFC_STATIC
)
2378 rtm
->rtm_protocol
= RTPROT_STATIC
;
2380 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2383 if (nla_put(skb
, RTA_SRC
, 16, &c
->mf6c_origin
) ||
2384 nla_put(skb
, RTA_DST
, 16, &c
->mf6c_mcastgrp
))
2385 goto nla_put_failure
;
2386 err
= __ip6mr_fill_mroute(mrt
, skb
, c
, rtm
);
2387 /* do not break the dump if cache is unresolved */
2388 if (err
< 0 && err
!= -ENOENT
)
2389 goto nla_put_failure
;
2391 return nlmsg_end(skb
, nlh
);
2394 nlmsg_cancel(skb
, nlh
);
2398 static int mr6_msgsize(bool unresolved
, int maxvif
)
2401 NLMSG_ALIGN(sizeof(struct rtmsg
))
2402 + nla_total_size(4) /* RTA_TABLE */
2403 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_SRC */
2404 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_DST */
2409 + nla_total_size(4) /* RTA_IIF */
2410 + nla_total_size(0) /* RTA_MULTIPATH */
2411 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2413 + nla_total_size(sizeof(struct rta_mfc_stats
))
2419 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
2422 struct net
*net
= read_pnet(&mrt
->net
);
2423 struct sk_buff
*skb
;
2426 skb
= nlmsg_new(mr6_msgsize(mfc
->mf6c_parent
>= MAXMIFS
, mrt
->maxvif
),
2431 err
= ip6mr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2435 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE
, NULL
, GFP_ATOMIC
);
2441 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE
, err
);
2444 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2446 struct net
*net
= sock_net(skb
->sk
);
2447 struct mr6_table
*mrt
;
2448 struct mfc6_cache
*mfc
;
2449 unsigned int t
= 0, s_t
;
2450 unsigned int h
= 0, s_h
;
2451 unsigned int e
= 0, s_e
;
2457 read_lock(&mrt_lock
);
2458 ip6mr_for_each_table(mrt
, net
) {
2463 for (h
= s_h
; h
< MFC6_LINES
; h
++) {
2464 list_for_each_entry(mfc
, &mrt
->mfc6_cache_array
[h
], list
) {
2467 if (ip6mr_fill_mroute(mrt
, skb
,
2468 NETLINK_CB(cb
->skb
).portid
,
2478 spin_lock_bh(&mfc_unres_lock
);
2479 list_for_each_entry(mfc
, &mrt
->mfc6_unres_queue
, list
) {
2482 if (ip6mr_fill_mroute(mrt
, skb
,
2483 NETLINK_CB(cb
->skb
).portid
,
2487 spin_unlock_bh(&mfc_unres_lock
);
2493 spin_unlock_bh(&mfc_unres_lock
);
2500 read_unlock(&mrt_lock
);