2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <linux/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
71 #include <linux/nospec.h>
74 struct fib_rule common
;
81 /* Big lock, protecting vif table, mrt cache and mroute socket state.
82 * Note that the changes are semaphored via rtnl_lock.
85 static DEFINE_RWLOCK(mrt_lock
);
87 /* Multicast router control variables */
89 /* Special spinlock for queue of unresolved entries */
90 static DEFINE_SPINLOCK(mfc_unres_lock
);
92 /* We return to original Alan's scheme. Hash table of resolved
93 * entries is changed only in process context and protected
94 * with weak lock mrt_lock. Queue of unresolved entries is protected
95 * with strong spinlock mfc_unres_lock.
97 * In this case data path is free of exclusive locks at all.
100 static struct kmem_cache
*mrt_cachep __read_mostly
;
102 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
);
103 static void ipmr_free_table(struct mr_table
*mrt
);
105 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
106 struct net_device
*dev
, struct sk_buff
*skb
,
107 struct mfc_cache
*cache
, int local
);
108 static int ipmr_cache_report(struct mr_table
*mrt
,
109 struct sk_buff
*pkt
, vifi_t vifi
, int assert);
110 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
111 struct mfc_cache
*c
, struct rtmsg
*rtm
);
112 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
114 static void igmpmsg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
);
115 static void mroute_clean_tables(struct mr_table
*mrt
, bool all
);
116 static void ipmr_expire_process(unsigned long arg
);
118 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
119 #define ipmr_for_each_table(mrt, net) \
120 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
122 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
124 struct mr_table
*mrt
;
126 ipmr_for_each_table(mrt
, net
) {
133 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
134 struct mr_table
**mrt
)
137 struct ipmr_result res
;
138 struct fib_lookup_arg arg
= {
140 .flags
= FIB_LOOKUP_NOREF
,
143 /* update flow if oif or iif point to device enslaved to l3mdev */
144 l3mdev_update_flow(net
, flowi4_to_flowi(flp4
));
146 err
= fib_rules_lookup(net
->ipv4
.mr_rules_ops
,
147 flowi4_to_flowi(flp4
), 0, &arg
);
154 static int ipmr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
155 int flags
, struct fib_lookup_arg
*arg
)
157 struct ipmr_result
*res
= arg
->result
;
158 struct mr_table
*mrt
;
160 switch (rule
->action
) {
163 case FR_ACT_UNREACHABLE
:
165 case FR_ACT_PROHIBIT
:
167 case FR_ACT_BLACKHOLE
:
172 arg
->table
= fib_rule_get_table(rule
, arg
);
174 mrt
= ipmr_get_table(rule
->fr_net
, arg
->table
);
181 static int ipmr_rule_match(struct fib_rule
*rule
, struct flowi
*fl
, int flags
)
186 static const struct nla_policy ipmr_rule_policy
[FRA_MAX
+ 1] = {
190 static int ipmr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
191 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
196 static int ipmr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
202 static int ipmr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
203 struct fib_rule_hdr
*frh
)
211 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template
= {
212 .family
= RTNL_FAMILY_IPMR
,
213 .rule_size
= sizeof(struct ipmr_rule
),
214 .addr_size
= sizeof(u32
),
215 .action
= ipmr_rule_action
,
216 .match
= ipmr_rule_match
,
217 .configure
= ipmr_rule_configure
,
218 .compare
= ipmr_rule_compare
,
219 .fill
= ipmr_rule_fill
,
220 .nlgroup
= RTNLGRP_IPV4_RULE
,
221 .policy
= ipmr_rule_policy
,
222 .owner
= THIS_MODULE
,
225 static int __net_init
ipmr_rules_init(struct net
*net
)
227 struct fib_rules_ops
*ops
;
228 struct mr_table
*mrt
;
231 ops
= fib_rules_register(&ipmr_rules_ops_template
, net
);
235 INIT_LIST_HEAD(&net
->ipv4
.mr_tables
);
237 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
243 err
= fib_default_rule_add(ops
, 0x7fff, RT_TABLE_DEFAULT
, 0);
247 net
->ipv4
.mr_rules_ops
= ops
;
251 ipmr_free_table(mrt
);
253 fib_rules_unregister(ops
);
257 static void __net_exit
ipmr_rules_exit(struct net
*net
)
259 struct mr_table
*mrt
, *next
;
262 list_for_each_entry_safe(mrt
, next
, &net
->ipv4
.mr_tables
, list
) {
263 list_del(&mrt
->list
);
264 ipmr_free_table(mrt
);
266 fib_rules_unregister(net
->ipv4
.mr_rules_ops
);
270 #define ipmr_for_each_table(mrt, net) \
271 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
273 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
275 return net
->ipv4
.mrt
;
278 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
279 struct mr_table
**mrt
)
281 *mrt
= net
->ipv4
.mrt
;
285 static int __net_init
ipmr_rules_init(struct net
*net
)
287 struct mr_table
*mrt
;
289 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
296 static void __net_exit
ipmr_rules_exit(struct net
*net
)
299 ipmr_free_table(net
->ipv4
.mrt
);
300 net
->ipv4
.mrt
= NULL
;
305 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg
*arg
,
308 const struct mfc_cache_cmp_arg
*cmparg
= arg
->key
;
309 struct mfc_cache
*c
= (struct mfc_cache
*)ptr
;
311 return cmparg
->mfc_mcastgrp
!= c
->mfc_mcastgrp
||
312 cmparg
->mfc_origin
!= c
->mfc_origin
;
315 static const struct rhashtable_params ipmr_rht_params
= {
316 .head_offset
= offsetof(struct mfc_cache
, mnode
),
317 .key_offset
= offsetof(struct mfc_cache
, cmparg
),
318 .key_len
= sizeof(struct mfc_cache_cmp_arg
),
321 .obj_cmpfn
= ipmr_hash_cmp
,
322 .automatic_shrinking
= true,
325 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
)
327 struct mr_table
*mrt
;
330 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
331 if (id
!= RT_TABLE_DEFAULT
&& id
>= 1000000000)
332 return ERR_PTR(-EINVAL
);
334 mrt
= ipmr_get_table(net
, id
);
338 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
340 return ERR_PTR(-ENOMEM
);
341 write_pnet(&mrt
->net
, net
);
344 err
= rhltable_init(&mrt
->mfc_hash
, &ipmr_rht_params
);
349 INIT_LIST_HEAD(&mrt
->mfc_cache_list
);
350 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
352 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
355 mrt
->mroute_reg_vif_num
= -1;
356 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
357 list_add_tail_rcu(&mrt
->list
, &net
->ipv4
.mr_tables
);
362 static void ipmr_free_table(struct mr_table
*mrt
)
364 del_timer_sync(&mrt
->ipmr_expire_timer
);
365 mroute_clean_tables(mrt
, true);
366 rhltable_destroy(&mrt
->mfc_hash
);
370 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
372 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
374 struct net
*net
= dev_net(dev
);
378 dev
= __dev_get_by_name(net
, "tunl0");
380 const struct net_device_ops
*ops
= dev
->netdev_ops
;
382 struct ip_tunnel_parm p
;
384 memset(&p
, 0, sizeof(p
));
385 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
386 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
389 p
.iph
.protocol
= IPPROTO_IPIP
;
390 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
391 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
393 if (ops
->ndo_do_ioctl
) {
394 mm_segment_t oldfs
= get_fs();
397 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
403 /* Initialize ipmr pimreg/tunnel in_device */
404 static bool ipmr_init_vif_indev(const struct net_device
*dev
)
406 struct in_device
*in_dev
;
410 in_dev
= __in_dev_get_rtnl(dev
);
413 ipv4_devconf_setall(in_dev
);
414 neigh_parms_data_state_setall(in_dev
->arp_parms
);
415 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
420 static struct net_device
*ipmr_new_tunnel(struct net
*net
, struct vifctl
*v
)
422 struct net_device
*dev
;
424 dev
= __dev_get_by_name(net
, "tunl0");
427 const struct net_device_ops
*ops
= dev
->netdev_ops
;
430 struct ip_tunnel_parm p
;
432 memset(&p
, 0, sizeof(p
));
433 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
434 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
437 p
.iph
.protocol
= IPPROTO_IPIP
;
438 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
439 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
441 if (ops
->ndo_do_ioctl
) {
442 mm_segment_t oldfs
= get_fs();
445 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
453 (dev
= __dev_get_by_name(net
, p
.name
)) != NULL
) {
454 dev
->flags
|= IFF_MULTICAST
;
455 if (!ipmr_init_vif_indev(dev
))
465 unregister_netdevice(dev
);
469 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
470 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
472 struct net
*net
= dev_net(dev
);
473 struct mr_table
*mrt
;
474 struct flowi4 fl4
= {
475 .flowi4_oif
= dev
->ifindex
,
476 .flowi4_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
477 .flowi4_mark
= skb
->mark
,
481 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
487 read_lock(&mrt_lock
);
488 dev
->stats
.tx_bytes
+= skb
->len
;
489 dev
->stats
.tx_packets
++;
490 ipmr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, IGMPMSG_WHOLEPKT
);
491 read_unlock(&mrt_lock
);
496 static int reg_vif_get_iflink(const struct net_device
*dev
)
501 static const struct net_device_ops reg_vif_netdev_ops
= {
502 .ndo_start_xmit
= reg_vif_xmit
,
503 .ndo_get_iflink
= reg_vif_get_iflink
,
506 static void reg_vif_setup(struct net_device
*dev
)
508 dev
->type
= ARPHRD_PIMREG
;
509 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
510 dev
->flags
= IFF_NOARP
;
511 dev
->netdev_ops
= ®_vif_netdev_ops
;
512 dev
->needs_free_netdev
= true;
513 dev
->features
|= NETIF_F_NETNS_LOCAL
;
516 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
518 struct net_device
*dev
;
521 if (mrt
->id
== RT_TABLE_DEFAULT
)
522 sprintf(name
, "pimreg");
524 sprintf(name
, "pimreg%u", mrt
->id
);
526 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
531 dev_net_set(dev
, net
);
533 if (register_netdevice(dev
)) {
538 if (!ipmr_init_vif_indev(dev
))
548 unregister_netdevice(dev
);
552 /* called with rcu_read_lock() */
553 static int __pim_rcv(struct mr_table
*mrt
, struct sk_buff
*skb
,
556 struct net_device
*reg_dev
= NULL
;
559 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
561 * a. packet is really sent to a multicast group
562 * b. packet is not a NULL-REGISTER
563 * c. packet is not truncated
565 if (!ipv4_is_multicast(encap
->daddr
) ||
566 encap
->tot_len
== 0 ||
567 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
570 read_lock(&mrt_lock
);
571 if (mrt
->mroute_reg_vif_num
>= 0)
572 reg_dev
= mrt
->vif_table
[mrt
->mroute_reg_vif_num
].dev
;
573 read_unlock(&mrt_lock
);
578 skb
->mac_header
= skb
->network_header
;
579 skb_pull(skb
, (u8
*)encap
- skb
->data
);
580 skb_reset_network_header(skb
);
581 skb
->protocol
= htons(ETH_P_IP
);
582 skb
->ip_summed
= CHECKSUM_NONE
;
584 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
588 return NET_RX_SUCCESS
;
591 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
598 * vif_delete - Delete a VIF entry
599 * @notify: Set to 1, if the caller is a notifier_call
601 static int vif_delete(struct mr_table
*mrt
, int vifi
, int notify
,
602 struct list_head
*head
)
604 struct vif_device
*v
;
605 struct net_device
*dev
;
606 struct in_device
*in_dev
;
608 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
609 return -EADDRNOTAVAIL
;
611 v
= &mrt
->vif_table
[vifi
];
613 write_lock_bh(&mrt_lock
);
618 write_unlock_bh(&mrt_lock
);
619 return -EADDRNOTAVAIL
;
622 if (vifi
== mrt
->mroute_reg_vif_num
)
623 mrt
->mroute_reg_vif_num
= -1;
625 if (vifi
+ 1 == mrt
->maxvif
) {
628 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
629 if (VIF_EXISTS(mrt
, tmp
))
635 write_unlock_bh(&mrt_lock
);
637 dev_set_allmulti(dev
, -1);
639 in_dev
= __in_dev_get_rtnl(dev
);
641 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
642 inet_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
643 NETCONFA_MC_FORWARDING
,
644 dev
->ifindex
, &in_dev
->cnf
);
645 ip_rt_multicast_event(in_dev
);
648 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
) && !notify
)
649 unregister_netdevice_queue(dev
, head
);
655 static void ipmr_cache_free_rcu(struct rcu_head
*head
)
657 struct mfc_cache
*c
= container_of(head
, struct mfc_cache
, rcu
);
659 kmem_cache_free(mrt_cachep
, c
);
662 static inline void ipmr_cache_free(struct mfc_cache
*c
)
664 call_rcu(&c
->rcu
, ipmr_cache_free_rcu
);
667 /* Destroy an unresolved cache entry, killing queued skbs
668 * and reporting error to netlink readers.
670 static void ipmr_destroy_unres(struct mr_table
*mrt
, struct mfc_cache
*c
)
672 struct net
*net
= read_pnet(&mrt
->net
);
676 atomic_dec(&mrt
->cache_resolve_queue_len
);
678 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
679 if (ip_hdr(skb
)->version
== 0) {
680 struct nlmsghdr
*nlh
= skb_pull(skb
,
681 sizeof(struct iphdr
));
682 nlh
->nlmsg_type
= NLMSG_ERROR
;
683 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
684 skb_trim(skb
, nlh
->nlmsg_len
);
686 e
->error
= -ETIMEDOUT
;
687 memset(&e
->msg
, 0, sizeof(e
->msg
));
689 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
698 /* Timer process for the unresolved queue. */
699 static void ipmr_expire_process(unsigned long arg
)
701 struct mr_table
*mrt
= (struct mr_table
*)arg
;
703 unsigned long expires
;
704 struct mfc_cache
*c
, *next
;
706 if (!spin_trylock(&mfc_unres_lock
)) {
707 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+HZ
/10);
711 if (list_empty(&mrt
->mfc_unres_queue
))
717 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
718 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
719 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
720 if (interval
< expires
)
726 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
727 ipmr_destroy_unres(mrt
, c
);
730 if (!list_empty(&mrt
->mfc_unres_queue
))
731 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
734 spin_unlock(&mfc_unres_lock
);
737 /* Fill oifs list. It is called under write locked mrt_lock. */
738 static void ipmr_update_thresholds(struct mr_table
*mrt
, struct mfc_cache
*cache
,
743 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
744 cache
->mfc_un
.res
.maxvif
= 0;
745 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
747 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
748 if (VIF_EXISTS(mrt
, vifi
) &&
749 ttls
[vifi
] && ttls
[vifi
] < 255) {
750 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
751 if (cache
->mfc_un
.res
.minvif
> vifi
)
752 cache
->mfc_un
.res
.minvif
= vifi
;
753 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
754 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
757 cache
->mfc_un
.res
.lastuse
= jiffies
;
760 static int vif_add(struct net
*net
, struct mr_table
*mrt
,
761 struct vifctl
*vifc
, int mrtsock
)
763 int vifi
= vifc
->vifc_vifi
;
764 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
765 struct net_device
*dev
;
766 struct in_device
*in_dev
;
770 if (VIF_EXISTS(mrt
, vifi
))
773 switch (vifc
->vifc_flags
) {
775 if (!ipmr_pimsm_enabled())
777 /* Special Purpose VIF in PIM
778 * All the packets will be sent to the daemon
780 if (mrt
->mroute_reg_vif_num
>= 0)
782 dev
= ipmr_reg_vif(net
, mrt
);
785 err
= dev_set_allmulti(dev
, 1);
787 unregister_netdevice(dev
);
793 dev
= ipmr_new_tunnel(net
, vifc
);
796 err
= dev_set_allmulti(dev
, 1);
798 ipmr_del_tunnel(dev
, vifc
);
803 case VIFF_USE_IFINDEX
:
805 if (vifc
->vifc_flags
== VIFF_USE_IFINDEX
) {
806 dev
= dev_get_by_index(net
, vifc
->vifc_lcl_ifindex
);
807 if (dev
&& !__in_dev_get_rtnl(dev
)) {
809 return -EADDRNOTAVAIL
;
812 dev
= ip_dev_find(net
, vifc
->vifc_lcl_addr
.s_addr
);
815 return -EADDRNOTAVAIL
;
816 err
= dev_set_allmulti(dev
, 1);
826 in_dev
= __in_dev_get_rtnl(dev
);
829 return -EADDRNOTAVAIL
;
831 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
832 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
, NETCONFA_MC_FORWARDING
,
833 dev
->ifindex
, &in_dev
->cnf
);
834 ip_rt_multicast_event(in_dev
);
836 /* Fill in the VIF structures */
838 v
->rate_limit
= vifc
->vifc_rate_limit
;
839 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
840 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
841 v
->flags
= vifc
->vifc_flags
;
843 v
->flags
|= VIFF_STATIC
;
844 v
->threshold
= vifc
->vifc_threshold
;
849 v
->link
= dev
->ifindex
;
850 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
))
851 v
->link
= dev_get_iflink(dev
);
853 /* And finish update writing critical data */
854 write_lock_bh(&mrt_lock
);
856 if (v
->flags
& VIFF_REGISTER
)
857 mrt
->mroute_reg_vif_num
= vifi
;
858 if (vifi
+1 > mrt
->maxvif
)
859 mrt
->maxvif
= vifi
+1;
860 write_unlock_bh(&mrt_lock
);
864 /* called with rcu_read_lock() */
865 static struct mfc_cache
*ipmr_cache_find(struct mr_table
*mrt
,
869 struct mfc_cache_cmp_arg arg
= {
870 .mfc_mcastgrp
= mcastgrp
,
873 struct rhlist_head
*tmp
, *list
;
876 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
877 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
883 /* Look for a (*,*,oif) entry */
884 static struct mfc_cache
*ipmr_cache_find_any_parent(struct mr_table
*mrt
,
887 struct mfc_cache_cmp_arg arg
= {
888 .mfc_mcastgrp
= htonl(INADDR_ANY
),
889 .mfc_origin
= htonl(INADDR_ANY
)
891 struct rhlist_head
*tmp
, *list
;
894 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
895 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
896 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
902 /* Look for a (*,G) entry */
903 static struct mfc_cache
*ipmr_cache_find_any(struct mr_table
*mrt
,
904 __be32 mcastgrp
, int vifi
)
906 struct mfc_cache_cmp_arg arg
= {
907 .mfc_mcastgrp
= mcastgrp
,
908 .mfc_origin
= htonl(INADDR_ANY
)
910 struct rhlist_head
*tmp
, *list
;
911 struct mfc_cache
*c
, *proxy
;
913 if (mcastgrp
== htonl(INADDR_ANY
))
916 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
917 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
) {
918 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
921 /* It's ok if the vifi is part of the static tree */
922 proxy
= ipmr_cache_find_any_parent(mrt
, c
->mfc_parent
);
923 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
928 return ipmr_cache_find_any_parent(mrt
, vifi
);
931 /* Look for a (S,G,iif) entry if parent != -1 */
932 static struct mfc_cache
*ipmr_cache_find_parent(struct mr_table
*mrt
,
933 __be32 origin
, __be32 mcastgrp
,
936 struct mfc_cache_cmp_arg arg
= {
937 .mfc_mcastgrp
= mcastgrp
,
938 .mfc_origin
= origin
,
940 struct rhlist_head
*tmp
, *list
;
943 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
944 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
945 if (parent
== -1 || parent
== c
->mfc_parent
)
951 /* Allocate a multicast cache entry */
952 static struct mfc_cache
*ipmr_cache_alloc(void)
954 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
957 c
->mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
958 c
->mfc_un
.res
.minvif
= MAXVIFS
;
963 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
965 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
968 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
969 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
974 /* A cache entry has gone into a resolved state from queued */
975 static void ipmr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
976 struct mfc_cache
*uc
, struct mfc_cache
*c
)
981 /* Play the pending entries through our router */
982 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
983 if (ip_hdr(skb
)->version
== 0) {
984 struct nlmsghdr
*nlh
= skb_pull(skb
,
985 sizeof(struct iphdr
));
987 if (__ipmr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
988 nlh
->nlmsg_len
= skb_tail_pointer(skb
) -
991 nlh
->nlmsg_type
= NLMSG_ERROR
;
992 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
993 skb_trim(skb
, nlh
->nlmsg_len
);
995 e
->error
= -EMSGSIZE
;
996 memset(&e
->msg
, 0, sizeof(e
->msg
));
999 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1001 ip_mr_forward(net
, mrt
, skb
->dev
, skb
, c
, 0);
1006 /* Bounce a cache query up to mrouted and netlink.
1008 * Called under mrt_lock.
1010 static int ipmr_cache_report(struct mr_table
*mrt
,
1011 struct sk_buff
*pkt
, vifi_t vifi
, int assert)
1013 const int ihl
= ip_hdrlen(pkt
);
1014 struct sock
*mroute_sk
;
1015 struct igmphdr
*igmp
;
1016 struct igmpmsg
*msg
;
1017 struct sk_buff
*skb
;
1020 if (assert == IGMPMSG_WHOLEPKT
)
1021 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
1023 skb
= alloc_skb(128, GFP_ATOMIC
);
1028 if (assert == IGMPMSG_WHOLEPKT
) {
1029 /* Ugly, but we have no choice with this interface.
1030 * Duplicate old header, fix ihl, length etc.
1031 * And all this only to mangle msg->im_msgtype and
1032 * to set msg->im_mbz to "mbz" :-)
1034 skb_push(skb
, sizeof(struct iphdr
));
1035 skb_reset_network_header(skb
);
1036 skb_reset_transport_header(skb
);
1037 msg
= (struct igmpmsg
*)skb_network_header(skb
);
1038 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
1039 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
1041 msg
->im_vif
= mrt
->mroute_reg_vif_num
;
1042 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
1043 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
1044 sizeof(struct iphdr
));
1046 /* Copy the IP header */
1047 skb_set_network_header(skb
, skb
->len
);
1049 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
1050 /* Flag to the kernel this is a route add */
1051 ip_hdr(skb
)->protocol
= 0;
1052 msg
= (struct igmpmsg
*)skb_network_header(skb
);
1054 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1055 /* Add our header */
1056 igmp
= skb_put(skb
, sizeof(struct igmphdr
));
1057 igmp
->type
= assert;
1058 msg
->im_msgtype
= assert;
1060 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
1061 skb
->transport_header
= skb
->network_header
;
1065 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1072 igmpmsg_netlink_event(mrt
, skb
);
1074 /* Deliver to mrouted */
1075 ret
= sock_queue_rcv_skb(mroute_sk
, skb
);
1078 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1085 /* Queue a packet for resolution. It gets locked cache entry! */
1086 static int ipmr_cache_unresolved(struct mr_table
*mrt
, vifi_t vifi
,
1087 struct sk_buff
*skb
, struct net_device
*dev
)
1089 const struct iphdr
*iph
= ip_hdr(skb
);
1090 struct mfc_cache
*c
;
1094 spin_lock_bh(&mfc_unres_lock
);
1095 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, list
) {
1096 if (c
->mfc_mcastgrp
== iph
->daddr
&&
1097 c
->mfc_origin
== iph
->saddr
) {
1104 /* Create a new entry if allowable */
1105 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1106 (c
= ipmr_cache_alloc_unres()) == NULL
) {
1107 spin_unlock_bh(&mfc_unres_lock
);
1113 /* Fill in the new cache entry */
1115 c
->mfc_origin
= iph
->saddr
;
1116 c
->mfc_mcastgrp
= iph
->daddr
;
1118 /* Reflect first query at mrouted. */
1119 err
= ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_NOCACHE
);
1121 /* If the report failed throw the cache entry
1124 spin_unlock_bh(&mfc_unres_lock
);
1131 atomic_inc(&mrt
->cache_resolve_queue_len
);
1132 list_add(&c
->list
, &mrt
->mfc_unres_queue
);
1133 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1135 if (atomic_read(&mrt
->cache_resolve_queue_len
) == 1)
1136 mod_timer(&mrt
->ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
1139 /* See if we can append the packet */
1140 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1146 skb
->skb_iif
= dev
->ifindex
;
1148 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1152 spin_unlock_bh(&mfc_unres_lock
);
1156 /* MFC cache manipulation by user space mroute daemon */
1158 static int ipmr_mfc_delete(struct mr_table
*mrt
, struct mfcctl
*mfc
, int parent
)
1160 struct mfc_cache
*c
;
1162 /* The entries are added/deleted only under RTNL */
1164 c
= ipmr_cache_find_parent(mrt
, mfc
->mfcc_origin
.s_addr
,
1165 mfc
->mfcc_mcastgrp
.s_addr
, parent
);
1169 rhltable_remove(&mrt
->mfc_hash
, &c
->mnode
, ipmr_rht_params
);
1170 list_del_rcu(&c
->list
);
1171 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1177 static int ipmr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1178 struct mfcctl
*mfc
, int mrtsock
, int parent
)
1180 struct mfc_cache
*uc
, *c
;
1184 if (mfc
->mfcc_parent
>= MAXVIFS
)
1187 /* The entries are added/deleted only under RTNL */
1189 c
= ipmr_cache_find_parent(mrt
, mfc
->mfcc_origin
.s_addr
,
1190 mfc
->mfcc_mcastgrp
.s_addr
, parent
);
1193 write_lock_bh(&mrt_lock
);
1194 c
->mfc_parent
= mfc
->mfcc_parent
;
1195 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1197 c
->mfc_flags
|= MFC_STATIC
;
1198 write_unlock_bh(&mrt_lock
);
1199 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1203 if (mfc
->mfcc_mcastgrp
.s_addr
!= htonl(INADDR_ANY
) &&
1204 !ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
1207 c
= ipmr_cache_alloc();
1211 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
1212 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
1213 c
->mfc_parent
= mfc
->mfcc_parent
;
1214 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1216 c
->mfc_flags
|= MFC_STATIC
;
1218 ret
= rhltable_insert_key(&mrt
->mfc_hash
, &c
->cmparg
, &c
->mnode
,
1221 pr_err("ipmr: rhtable insert error %d\n", ret
);
1225 list_add_tail_rcu(&c
->list
, &mrt
->mfc_cache_list
);
1226 /* Check to see if we resolved a queued list. If so we
1227 * need to send on the frames and tidy up.
1230 spin_lock_bh(&mfc_unres_lock
);
1231 list_for_each_entry(uc
, &mrt
->mfc_unres_queue
, list
) {
1232 if (uc
->mfc_origin
== c
->mfc_origin
&&
1233 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
1234 list_del(&uc
->list
);
1235 atomic_dec(&mrt
->cache_resolve_queue_len
);
1240 if (list_empty(&mrt
->mfc_unres_queue
))
1241 del_timer(&mrt
->ipmr_expire_timer
);
1242 spin_unlock_bh(&mfc_unres_lock
);
1245 ipmr_cache_resolve(net
, mrt
, uc
, c
);
1246 ipmr_cache_free(uc
);
1248 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1252 /* Close the multicast socket, and clear the vif tables etc */
1253 static void mroute_clean_tables(struct mr_table
*mrt
, bool all
)
1255 struct mfc_cache
*c
, *tmp
;
1259 /* Shut down all active vif entries */
1260 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1261 if (!all
&& (mrt
->vif_table
[i
].flags
& VIFF_STATIC
))
1263 vif_delete(mrt
, i
, 0, &list
);
1265 unregister_netdevice_many(&list
);
1267 /* Wipe the cache */
1268 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_cache_list
, list
) {
1269 if (!all
&& (c
->mfc_flags
& MFC_STATIC
))
1271 rhltable_remove(&mrt
->mfc_hash
, &c
->mnode
, ipmr_rht_params
);
1272 list_del_rcu(&c
->list
);
1273 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1277 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1278 spin_lock_bh(&mfc_unres_lock
);
1279 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_unres_queue
, list
) {
1281 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1282 ipmr_destroy_unres(mrt
, c
);
1284 spin_unlock_bh(&mfc_unres_lock
);
1288 /* called from ip_ra_control(), before an RCU grace period,
1289 * we dont need to call synchronize_rcu() here
1291 static void mrtsock_destruct(struct sock
*sk
)
1293 struct net
*net
= sock_net(sk
);
1294 struct mr_table
*mrt
;
1297 ipmr_for_each_table(mrt
, net
) {
1298 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1299 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)--;
1300 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1301 NETCONFA_MC_FORWARDING
,
1302 NETCONFA_IFINDEX_ALL
,
1303 net
->ipv4
.devconf_all
);
1304 RCU_INIT_POINTER(mrt
->mroute_sk
, NULL
);
1305 mroute_clean_tables(mrt
, false);
1310 /* Socket options and virtual interface manipulation. The whole
1311 * virtual interface system is a complete heap, but unfortunately
1312 * that's how BSD mrouted happens to think. Maybe one day with a proper
1313 * MOSPF/PIM router set up we can clean this up.
1316 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1317 unsigned int optlen
)
1319 struct net
*net
= sock_net(sk
);
1320 int val
, ret
= 0, parent
= 0;
1321 struct mr_table
*mrt
;
1326 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1328 if (sk
->sk_type
!= SOCK_RAW
||
1329 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
) {
1334 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1339 if (optname
!= MRT_INIT
) {
1340 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
) &&
1341 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
)) {
1349 if (optlen
!= sizeof(int)) {
1353 if (rtnl_dereference(mrt
->mroute_sk
)) {
1358 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
1360 rcu_assign_pointer(mrt
->mroute_sk
, sk
);
1361 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)++;
1362 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1363 NETCONFA_MC_FORWARDING
,
1364 NETCONFA_IFINDEX_ALL
,
1365 net
->ipv4
.devconf_all
);
1369 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
)) {
1372 ret
= ip_ra_control(sk
, 0, NULL
);
1378 if (optlen
!= sizeof(vif
)) {
1382 if (copy_from_user(&vif
, optval
, sizeof(vif
))) {
1386 if (vif
.vifc_vifi
>= MAXVIFS
) {
1390 if (optname
== MRT_ADD_VIF
) {
1391 ret
= vif_add(net
, mrt
, &vif
,
1392 sk
== rtnl_dereference(mrt
->mroute_sk
));
1394 ret
= vif_delete(mrt
, vif
.vifc_vifi
, 0, NULL
);
1397 /* Manipulate the forwarding caches. These live
1398 * in a sort of kernel/user symbiosis.
1403 case MRT_ADD_MFC_PROXY
:
1404 case MRT_DEL_MFC_PROXY
:
1405 if (optlen
!= sizeof(mfc
)) {
1409 if (copy_from_user(&mfc
, optval
, sizeof(mfc
))) {
1414 parent
= mfc
.mfcc_parent
;
1415 if (optname
== MRT_DEL_MFC
|| optname
== MRT_DEL_MFC_PROXY
)
1416 ret
= ipmr_mfc_delete(mrt
, &mfc
, parent
);
1418 ret
= ipmr_mfc_add(net
, mrt
, &mfc
,
1419 sk
== rtnl_dereference(mrt
->mroute_sk
),
1422 /* Control PIM assert. */
1424 if (optlen
!= sizeof(val
)) {
1428 if (get_user(val
, (int __user
*)optval
)) {
1432 mrt
->mroute_do_assert
= val
;
1435 if (!ipmr_pimsm_enabled()) {
1439 if (optlen
!= sizeof(val
)) {
1443 if (get_user(val
, (int __user
*)optval
)) {
1449 if (val
!= mrt
->mroute_do_pim
) {
1450 mrt
->mroute_do_pim
= val
;
1451 mrt
->mroute_do_assert
= val
;
1455 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES
)) {
1459 if (optlen
!= sizeof(uval
)) {
1463 if (get_user(uval
, (u32 __user
*)optval
)) {
1468 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1471 mrt
= ipmr_new_table(net
, uval
);
1475 raw_sk(sk
)->ipmr_table
= uval
;
1478 /* Spurious command, or MRT_VERSION which you cannot set. */
1487 /* Getsock opt support for the multicast routing system. */
1488 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1492 struct net
*net
= sock_net(sk
);
1493 struct mr_table
*mrt
;
1495 if (sk
->sk_type
!= SOCK_RAW
||
1496 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1499 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1508 if (!ipmr_pimsm_enabled())
1509 return -ENOPROTOOPT
;
1510 val
= mrt
->mroute_do_pim
;
1513 val
= mrt
->mroute_do_assert
;
1516 return -ENOPROTOOPT
;
1519 if (get_user(olr
, optlen
))
1521 olr
= min_t(unsigned int, olr
, sizeof(int));
1524 if (put_user(olr
, optlen
))
1526 if (copy_to_user(optval
, &val
, olr
))
1531 /* The IP multicast ioctl support routines. */
1532 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1534 struct sioc_sg_req sr
;
1535 struct sioc_vif_req vr
;
1536 struct vif_device
*vif
;
1537 struct mfc_cache
*c
;
1538 struct net
*net
= sock_net(sk
);
1539 struct mr_table
*mrt
;
1541 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1547 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1549 if (vr
.vifi
>= mrt
->maxvif
)
1551 read_lock(&mrt_lock
);
1552 vif
= &mrt
->vif_table
[vr
.vifi
];
1553 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1554 vr
.icount
= vif
->pkt_in
;
1555 vr
.ocount
= vif
->pkt_out
;
1556 vr
.ibytes
= vif
->bytes_in
;
1557 vr
.obytes
= vif
->bytes_out
;
1558 read_unlock(&mrt_lock
);
1560 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1564 read_unlock(&mrt_lock
);
1565 return -EADDRNOTAVAIL
;
1567 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1571 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1573 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1574 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1575 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1578 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1583 return -EADDRNOTAVAIL
;
1585 return -ENOIOCTLCMD
;
1589 #ifdef CONFIG_COMPAT
1590 struct compat_sioc_sg_req
{
1593 compat_ulong_t pktcnt
;
1594 compat_ulong_t bytecnt
;
1595 compat_ulong_t wrong_if
;
1598 struct compat_sioc_vif_req
{
1599 vifi_t vifi
; /* Which iface */
1600 compat_ulong_t icount
;
1601 compat_ulong_t ocount
;
1602 compat_ulong_t ibytes
;
1603 compat_ulong_t obytes
;
1606 int ipmr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1608 struct compat_sioc_sg_req sr
;
1609 struct compat_sioc_vif_req vr
;
1610 struct vif_device
*vif
;
1611 struct mfc_cache
*c
;
1612 struct net
*net
= sock_net(sk
);
1613 struct mr_table
*mrt
;
1615 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1621 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1623 if (vr
.vifi
>= mrt
->maxvif
)
1625 vr
.vifi
= array_index_nospec(vr
.vifi
, mrt
->maxvif
);
1626 read_lock(&mrt_lock
);
1627 vif
= &mrt
->vif_table
[vr
.vifi
];
1628 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1629 vr
.icount
= vif
->pkt_in
;
1630 vr
.ocount
= vif
->pkt_out
;
1631 vr
.ibytes
= vif
->bytes_in
;
1632 vr
.obytes
= vif
->bytes_out
;
1633 read_unlock(&mrt_lock
);
1635 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1639 read_unlock(&mrt_lock
);
1640 return -EADDRNOTAVAIL
;
1642 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1646 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1648 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1649 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1650 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1653 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1658 return -EADDRNOTAVAIL
;
1660 return -ENOIOCTLCMD
;
1665 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1667 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1668 struct net
*net
= dev_net(dev
);
1669 struct mr_table
*mrt
;
1670 struct vif_device
*v
;
1673 if (event
!= NETDEV_UNREGISTER
)
1676 ipmr_for_each_table(mrt
, net
) {
1677 v
= &mrt
->vif_table
[0];
1678 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1680 vif_delete(mrt
, ct
, 1, NULL
);
1686 static struct notifier_block ip_mr_notifier
= {
1687 .notifier_call
= ipmr_device_event
,
1690 /* Encapsulate a packet by attaching a valid IPIP header to it.
1691 * This avoids tunnel drivers and other mess and gives us the speed so
1692 * important for multicast video.
1694 static void ip_encap(struct net
*net
, struct sk_buff
*skb
,
1695 __be32 saddr
, __be32 daddr
)
1698 const struct iphdr
*old_iph
= ip_hdr(skb
);
1700 skb_push(skb
, sizeof(struct iphdr
));
1701 skb
->transport_header
= skb
->network_header
;
1702 skb_reset_network_header(skb
);
1706 iph
->tos
= old_iph
->tos
;
1707 iph
->ttl
= old_iph
->ttl
;
1711 iph
->protocol
= IPPROTO_IPIP
;
1713 iph
->tot_len
= htons(skb
->len
);
1714 ip_select_ident(net
, skb
, NULL
);
1717 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1721 static inline int ipmr_forward_finish(struct net
*net
, struct sock
*sk
,
1722 struct sk_buff
*skb
)
1724 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
1726 IP_INC_STATS(net
, IPSTATS_MIB_OUTFORWDATAGRAMS
);
1727 IP_ADD_STATS(net
, IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1729 if (unlikely(opt
->optlen
))
1730 ip_forward_options(skb
);
1732 return dst_output(net
, sk
, skb
);
1735 /* Processing handlers for ipmr_forward */
1737 static void ipmr_queue_xmit(struct net
*net
, struct mr_table
*mrt
,
1738 struct sk_buff
*skb
, struct mfc_cache
*c
, int vifi
)
1740 const struct iphdr
*iph
= ip_hdr(skb
);
1741 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
1742 struct net_device
*dev
;
1750 if (vif
->flags
& VIFF_REGISTER
) {
1752 vif
->bytes_out
+= skb
->len
;
1753 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1754 vif
->dev
->stats
.tx_packets
++;
1755 ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_WHOLEPKT
);
1759 if (vif
->flags
& VIFF_TUNNEL
) {
1760 rt
= ip_route_output_ports(net
, &fl4
, NULL
,
1761 vif
->remote
, vif
->local
,
1764 RT_TOS(iph
->tos
), vif
->link
);
1767 encap
= sizeof(struct iphdr
);
1769 rt
= ip_route_output_ports(net
, &fl4
, NULL
, iph
->daddr
, 0,
1772 RT_TOS(iph
->tos
), vif
->link
);
1779 if (skb
->len
+encap
> dst_mtu(&rt
->dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1780 /* Do not fragment multicasts. Alas, IPv4 does not
1781 * allow to send ICMP, so that packets will disappear
1784 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
1789 encap
+= LL_RESERVED_SPACE(dev
) + rt
->dst
.header_len
;
1791 if (skb_cow(skb
, encap
)) {
1797 vif
->bytes_out
+= skb
->len
;
1800 skb_dst_set(skb
, &rt
->dst
);
1801 ip_decrease_ttl(ip_hdr(skb
));
1803 /* FIXME: forward and output firewalls used to be called here.
1804 * What do we do with netfilter? -- RR
1806 if (vif
->flags
& VIFF_TUNNEL
) {
1807 ip_encap(net
, skb
, vif
->local
, vif
->remote
);
1808 /* FIXME: extra output firewall step used to be here. --RR */
1809 vif
->dev
->stats
.tx_packets
++;
1810 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1813 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1815 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1816 * not only before forwarding, but after forwarding on all output
1817 * interfaces. It is clear, if mrouter runs a multicasting
1818 * program, it should receive packets not depending to what interface
1819 * program is joined.
1820 * If we will not make it, the program will have to join on all
1821 * interfaces. On the other hand, multihoming host (or router, but
1822 * not mrouter) cannot join to more than one interface - it will
1823 * result in receiving multiple packets.
1825 NF_HOOK(NFPROTO_IPV4
, NF_INET_FORWARD
,
1826 net
, NULL
, skb
, skb
->dev
, dev
,
1827 ipmr_forward_finish
);
1834 static int ipmr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
1838 for (ct
= mrt
->maxvif
-1; ct
>= 0; ct
--) {
1839 if (mrt
->vif_table
[ct
].dev
== dev
)
1845 /* "local" means that we should preserve one skb (for local delivery) */
1846 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
1847 struct net_device
*dev
, struct sk_buff
*skb
,
1848 struct mfc_cache
*cache
, int local
)
1850 int true_vifi
= ipmr_find_vif(mrt
, dev
);
1854 vif
= cache
->mfc_parent
;
1855 cache
->mfc_un
.res
.pkt
++;
1856 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1857 cache
->mfc_un
.res
.lastuse
= jiffies
;
1859 if (cache
->mfc_origin
== htonl(INADDR_ANY
) && true_vifi
>= 0) {
1860 struct mfc_cache
*cache_proxy
;
1862 /* For an (*,G) entry, we only check that the incomming
1863 * interface is part of the static tree.
1865 cache_proxy
= ipmr_cache_find_any_parent(mrt
, vif
);
1867 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
1871 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1872 if (mrt
->vif_table
[vif
].dev
!= dev
) {
1873 if (rt_is_output_route(skb_rtable(skb
))) {
1874 /* It is our own packet, looped back.
1875 * Very complicated situation...
1877 * The best workaround until routing daemons will be
1878 * fixed is not to redistribute packet, if it was
1879 * send through wrong interface. It means, that
1880 * multicast applications WILL NOT work for
1881 * (S,G), which have default multicast route pointing
1882 * to wrong oif. In any case, it is not a good
1883 * idea to use multicasting applications on router.
1888 cache
->mfc_un
.res
.wrong_if
++;
1890 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1891 /* pimsm uses asserts, when switching from RPT to SPT,
1892 * so that we cannot check that packet arrived on an oif.
1893 * It is bad, but otherwise we would need to move pretty
1894 * large chunk of pimd to kernel. Ough... --ANK
1896 (mrt
->mroute_do_pim
||
1897 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1899 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1900 cache
->mfc_un
.res
.last_assert
= jiffies
;
1901 ipmr_cache_report(mrt
, skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1907 mrt
->vif_table
[vif
].pkt_in
++;
1908 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
1910 /* Forward the frame */
1911 if (cache
->mfc_origin
== htonl(INADDR_ANY
) &&
1912 cache
->mfc_mcastgrp
== htonl(INADDR_ANY
)) {
1913 if (true_vifi
>= 0 &&
1914 true_vifi
!= cache
->mfc_parent
&&
1916 cache
->mfc_un
.res
.ttls
[cache
->mfc_parent
]) {
1917 /* It's an (*,*) entry and the packet is not coming from
1918 * the upstream: forward the packet to the upstream
1921 psend
= cache
->mfc_parent
;
1926 for (ct
= cache
->mfc_un
.res
.maxvif
- 1;
1927 ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1928 /* For (*,G) entry, don't forward to the incoming interface */
1929 if ((cache
->mfc_origin
!= htonl(INADDR_ANY
) ||
1931 ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1933 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1936 ipmr_queue_xmit(net
, mrt
, skb2
, cache
,
1945 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1948 ipmr_queue_xmit(net
, mrt
, skb2
, cache
, psend
);
1950 ipmr_queue_xmit(net
, mrt
, skb
, cache
, psend
);
1960 static struct mr_table
*ipmr_rt_fib_lookup(struct net
*net
, struct sk_buff
*skb
)
1962 struct rtable
*rt
= skb_rtable(skb
);
1963 struct iphdr
*iph
= ip_hdr(skb
);
1964 struct flowi4 fl4
= {
1965 .daddr
= iph
->daddr
,
1966 .saddr
= iph
->saddr
,
1967 .flowi4_tos
= RT_TOS(iph
->tos
),
1968 .flowi4_oif
= (rt_is_output_route(rt
) ?
1969 skb
->dev
->ifindex
: 0),
1970 .flowi4_iif
= (rt_is_output_route(rt
) ?
1973 .flowi4_mark
= skb
->mark
,
1975 struct mr_table
*mrt
;
1978 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
1980 return ERR_PTR(err
);
1984 /* Multicast packets for forwarding arrive here
1985 * Called with rcu_read_lock();
1987 int ip_mr_input(struct sk_buff
*skb
)
1989 struct mfc_cache
*cache
;
1990 struct net
*net
= dev_net(skb
->dev
);
1991 int local
= skb_rtable(skb
)->rt_flags
& RTCF_LOCAL
;
1992 struct mr_table
*mrt
;
1993 struct net_device
*dev
;
1995 /* skb->dev passed in is the loX master dev for vrfs.
1996 * As there are no vifs associated with loopback devices,
1997 * get the proper interface that does have a vif associated with it.
2000 if (netif_is_l3_master(skb
->dev
)) {
2001 dev
= dev_get_by_index_rcu(net
, IPCB(skb
)->iif
);
2008 /* Packet is looped back after forward, it should not be
2009 * forwarded second time, but still can be delivered locally.
2011 if (IPCB(skb
)->flags
& IPSKB_FORWARDED
)
2014 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2017 return PTR_ERR(mrt
);
2020 if (IPCB(skb
)->opt
.router_alert
) {
2021 if (ip_call_ra_chain(skb
))
2023 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
) {
2024 /* IGMPv1 (and broken IGMPv2 implementations sort of
2025 * Cisco IOS <= 11.2(8)) do not put router alert
2026 * option to IGMP packets destined to routable
2027 * groups. It is very bad, because it means
2028 * that we can forward NO IGMP messages.
2030 struct sock
*mroute_sk
;
2032 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
2035 raw_rcv(mroute_sk
, skb
);
2041 /* already under rcu_read_lock() */
2042 cache
= ipmr_cache_find(mrt
, ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
2044 int vif
= ipmr_find_vif(mrt
, dev
);
2047 cache
= ipmr_cache_find_any(mrt
, ip_hdr(skb
)->daddr
,
2051 /* No usable cache entry */
2056 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2057 ip_local_deliver(skb
);
2063 read_lock(&mrt_lock
);
2064 vif
= ipmr_find_vif(mrt
, dev
);
2066 int err2
= ipmr_cache_unresolved(mrt
, vif
, skb
, dev
);
2067 read_unlock(&mrt_lock
);
2071 read_unlock(&mrt_lock
);
2076 read_lock(&mrt_lock
);
2077 ip_mr_forward(net
, mrt
, dev
, skb
, cache
, local
);
2078 read_unlock(&mrt_lock
);
2081 return ip_local_deliver(skb
);
2087 return ip_local_deliver(skb
);
2092 #ifdef CONFIG_IP_PIMSM_V1
2093 /* Handle IGMP messages of PIMv1 */
2094 int pim_rcv_v1(struct sk_buff
*skb
)
2096 struct igmphdr
*pim
;
2097 struct net
*net
= dev_net(skb
->dev
);
2098 struct mr_table
*mrt
;
2100 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2103 pim
= igmp_hdr(skb
);
2105 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2108 if (!mrt
->mroute_do_pim
||
2109 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
2112 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2120 #ifdef CONFIG_IP_PIMSM_V2
2121 static int pim_rcv(struct sk_buff
*skb
)
2123 struct pimreghdr
*pim
;
2124 struct net
*net
= dev_net(skb
->dev
);
2125 struct mr_table
*mrt
;
2127 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2130 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
2131 if (pim
->type
!= ((PIM_VERSION
<< 4) | (PIM_TYPE_REGISTER
)) ||
2132 (pim
->flags
& PIM_NULL_REGISTER
) ||
2133 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
2134 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
2137 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2140 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2148 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2149 struct mfc_cache
*c
, struct rtmsg
*rtm
)
2151 struct rta_mfc_stats mfcs
;
2152 struct nlattr
*mp_attr
;
2153 struct rtnexthop
*nhp
;
2154 unsigned long lastuse
;
2157 /* If cache is unresolved, don't try to parse IIF and OIF */
2158 if (c
->mfc_parent
>= MAXVIFS
) {
2159 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
2163 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
2164 nla_put_u32(skb
, RTA_IIF
, mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
2167 if (!(mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
)))
2170 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2171 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2172 if (!(nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
)))) {
2173 nla_nest_cancel(skb
, mp_attr
);
2177 nhp
->rtnh_flags
= 0;
2178 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2179 nhp
->rtnh_ifindex
= mrt
->vif_table
[ct
].dev
->ifindex
;
2180 nhp
->rtnh_len
= sizeof(*nhp
);
2184 nla_nest_end(skb
, mp_attr
);
2186 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
2187 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
2189 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2190 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2191 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2192 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
2193 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
2197 rtm
->rtm_type
= RTN_MULTICAST
;
2201 int ipmr_get_route(struct net
*net
, struct sk_buff
*skb
,
2202 __be32 saddr
, __be32 daddr
,
2203 struct rtmsg
*rtm
, u32 portid
)
2205 struct mfc_cache
*cache
;
2206 struct mr_table
*mrt
;
2209 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2214 cache
= ipmr_cache_find(mrt
, saddr
, daddr
);
2215 if (!cache
&& skb
->dev
) {
2216 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
2219 cache
= ipmr_cache_find_any(mrt
, daddr
, vif
);
2222 struct sk_buff
*skb2
;
2224 struct net_device
*dev
;
2228 read_lock(&mrt_lock
);
2230 vif
= ipmr_find_vif(mrt
, dev
);
2232 read_unlock(&mrt_lock
);
2236 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2238 read_unlock(&mrt_lock
);
2243 NETLINK_CB(skb2
).portid
= portid
;
2244 skb_push(skb2
, sizeof(struct iphdr
));
2245 skb_reset_network_header(skb2
);
2247 iph
->ihl
= sizeof(struct iphdr
) >> 2;
2251 err
= ipmr_cache_unresolved(mrt
, vif
, skb2
, dev
);
2252 read_unlock(&mrt_lock
);
2257 read_lock(&mrt_lock
);
2258 err
= __ipmr_fill_mroute(mrt
, skb
, cache
, rtm
);
2259 read_unlock(&mrt_lock
);
2264 static int ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2265 u32 portid
, u32 seq
, struct mfc_cache
*c
, int cmd
,
2268 struct nlmsghdr
*nlh
;
2272 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2276 rtm
= nlmsg_data(nlh
);
2277 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2278 rtm
->rtm_dst_len
= 32;
2279 rtm
->rtm_src_len
= 32;
2281 rtm
->rtm_table
= mrt
->id
;
2282 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2283 goto nla_put_failure
;
2284 rtm
->rtm_type
= RTN_MULTICAST
;
2285 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2286 if (c
->mfc_flags
& MFC_STATIC
)
2287 rtm
->rtm_protocol
= RTPROT_STATIC
;
2289 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2292 if (nla_put_in_addr(skb
, RTA_SRC
, c
->mfc_origin
) ||
2293 nla_put_in_addr(skb
, RTA_DST
, c
->mfc_mcastgrp
))
2294 goto nla_put_failure
;
2295 err
= __ipmr_fill_mroute(mrt
, skb
, c
, rtm
);
2296 /* do not break the dump if cache is unresolved */
2297 if (err
< 0 && err
!= -ENOENT
)
2298 goto nla_put_failure
;
2300 nlmsg_end(skb
, nlh
);
2304 nlmsg_cancel(skb
, nlh
);
2308 static size_t mroute_msgsize(bool unresolved
, int maxvif
)
2311 NLMSG_ALIGN(sizeof(struct rtmsg
))
2312 + nla_total_size(4) /* RTA_TABLE */
2313 + nla_total_size(4) /* RTA_SRC */
2314 + nla_total_size(4) /* RTA_DST */
2319 + nla_total_size(4) /* RTA_IIF */
2320 + nla_total_size(0) /* RTA_MULTIPATH */
2321 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2323 + nla_total_size_64bit(sizeof(struct rta_mfc_stats
))
2329 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
2332 struct net
*net
= read_pnet(&mrt
->net
);
2333 struct sk_buff
*skb
;
2336 skb
= nlmsg_new(mroute_msgsize(mfc
->mfc_parent
>= MAXVIFS
, mrt
->maxvif
),
2341 err
= ipmr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2345 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE
, NULL
, GFP_ATOMIC
);
2351 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE
, err
);
2354 static size_t igmpmsg_netlink_msgsize(size_t payloadlen
)
2357 NLMSG_ALIGN(sizeof(struct rtgenmsg
))
2358 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2359 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2360 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2361 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2362 /* IPMRA_CREPORT_PKT */
2363 + nla_total_size(payloadlen
)
2369 static void igmpmsg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
)
2371 struct net
*net
= read_pnet(&mrt
->net
);
2372 struct nlmsghdr
*nlh
;
2373 struct rtgenmsg
*rtgenm
;
2374 struct igmpmsg
*msg
;
2375 struct sk_buff
*skb
;
2379 payloadlen
= pkt
->len
- sizeof(struct igmpmsg
);
2380 msg
= (struct igmpmsg
*)skb_network_header(pkt
);
2382 skb
= nlmsg_new(igmpmsg_netlink_msgsize(payloadlen
), GFP_ATOMIC
);
2386 nlh
= nlmsg_put(skb
, 0, 0, RTM_NEWCACHEREPORT
,
2387 sizeof(struct rtgenmsg
), 0);
2390 rtgenm
= nlmsg_data(nlh
);
2391 rtgenm
->rtgen_family
= RTNL_FAMILY_IPMR
;
2392 if (nla_put_u8(skb
, IPMRA_CREPORT_MSGTYPE
, msg
->im_msgtype
) ||
2393 nla_put_u32(skb
, IPMRA_CREPORT_VIF_ID
, msg
->im_vif
) ||
2394 nla_put_in_addr(skb
, IPMRA_CREPORT_SRC_ADDR
,
2395 msg
->im_src
.s_addr
) ||
2396 nla_put_in_addr(skb
, IPMRA_CREPORT_DST_ADDR
,
2397 msg
->im_dst
.s_addr
))
2398 goto nla_put_failure
;
2400 nla
= nla_reserve(skb
, IPMRA_CREPORT_PKT
, payloadlen
);
2401 if (!nla
|| skb_copy_bits(pkt
, sizeof(struct igmpmsg
),
2402 nla_data(nla
), payloadlen
))
2403 goto nla_put_failure
;
2405 nlmsg_end(skb
, nlh
);
2407 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE_R
, NULL
, GFP_ATOMIC
);
2411 nlmsg_cancel(skb
, nlh
);
2414 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE_R
, -ENOBUFS
);
2417 static int ipmr_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
2418 struct netlink_ext_ack
*extack
)
2420 struct net
*net
= sock_net(in_skb
->sk
);
2421 struct nlattr
*tb
[RTA_MAX
+ 1];
2422 struct sk_buff
*skb
= NULL
;
2423 struct mfc_cache
*cache
;
2424 struct mr_table
*mrt
;
2430 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
,
2431 rtm_ipv4_policy
, extack
);
2435 rtm
= nlmsg_data(nlh
);
2437 src
= tb
[RTA_SRC
] ? nla_get_in_addr(tb
[RTA_SRC
]) : 0;
2438 grp
= tb
[RTA_DST
] ? nla_get_in_addr(tb
[RTA_DST
]) : 0;
2439 tableid
= tb
[RTA_TABLE
] ? nla_get_u32(tb
[RTA_TABLE
]) : 0;
2441 mrt
= ipmr_get_table(net
, tableid
? tableid
: RT_TABLE_DEFAULT
);
2447 /* entries are added/deleted only under RTNL */
2449 cache
= ipmr_cache_find(mrt
, src
, grp
);
2456 skb
= nlmsg_new(mroute_msgsize(false, mrt
->maxvif
), GFP_KERNEL
);
2462 err
= ipmr_fill_mroute(mrt
, skb
, NETLINK_CB(in_skb
).portid
,
2463 nlh
->nlmsg_seq
, cache
,
2468 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
2478 static int ipmr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2480 struct net
*net
= sock_net(skb
->sk
);
2481 struct mr_table
*mrt
;
2482 struct mfc_cache
*mfc
;
2483 unsigned int t
= 0, s_t
;
2484 unsigned int e
= 0, s_e
;
2490 ipmr_for_each_table(mrt
, net
) {
2493 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
) {
2496 if (ipmr_fill_mroute(mrt
, skb
,
2497 NETLINK_CB(cb
->skb
).portid
,
2506 spin_lock_bh(&mfc_unres_lock
);
2507 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
2510 if (ipmr_fill_mroute(mrt
, skb
,
2511 NETLINK_CB(cb
->skb
).portid
,
2515 spin_unlock_bh(&mfc_unres_lock
);
2521 spin_unlock_bh(&mfc_unres_lock
);
2536 static const struct nla_policy rtm_ipmr_policy
[RTA_MAX
+ 1] = {
2537 [RTA_SRC
] = { .type
= NLA_U32
},
2538 [RTA_DST
] = { .type
= NLA_U32
},
2539 [RTA_IIF
] = { .type
= NLA_U32
},
2540 [RTA_TABLE
] = { .type
= NLA_U32
},
2541 [RTA_MULTIPATH
] = { .len
= sizeof(struct rtnexthop
) },
2544 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol
)
2546 switch (rtm_protocol
) {
2548 case RTPROT_MROUTED
:
2554 static int ipmr_nla_get_ttls(const struct nlattr
*nla
, struct mfcctl
*mfcc
)
2556 struct rtnexthop
*rtnh
= nla_data(nla
);
2557 int remaining
= nla_len(nla
), vifi
= 0;
2559 while (rtnh_ok(rtnh
, remaining
)) {
2560 mfcc
->mfcc_ttls
[vifi
] = rtnh
->rtnh_hops
;
2561 if (++vifi
== MAXVIFS
)
2563 rtnh
= rtnh_next(rtnh
, &remaining
);
2566 return remaining
> 0 ? -EINVAL
: vifi
;
2569 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2570 static int rtm_to_ipmr_mfcc(struct net
*net
, struct nlmsghdr
*nlh
,
2571 struct mfcctl
*mfcc
, int *mrtsock
,
2572 struct mr_table
**mrtret
,
2573 struct netlink_ext_ack
*extack
)
2575 struct net_device
*dev
= NULL
;
2576 u32 tblid
= RT_TABLE_DEFAULT
;
2577 struct mr_table
*mrt
;
2578 struct nlattr
*attr
;
2582 ret
= nlmsg_validate(nlh
, sizeof(*rtm
), RTA_MAX
, rtm_ipmr_policy
,
2586 rtm
= nlmsg_data(nlh
);
2589 if (rtm
->rtm_family
!= RTNL_FAMILY_IPMR
|| rtm
->rtm_dst_len
!= 32 ||
2590 rtm
->rtm_type
!= RTN_MULTICAST
||
2591 rtm
->rtm_scope
!= RT_SCOPE_UNIVERSE
||
2592 !ipmr_rtm_validate_proto(rtm
->rtm_protocol
))
2595 memset(mfcc
, 0, sizeof(*mfcc
));
2596 mfcc
->mfcc_parent
= -1;
2598 nlmsg_for_each_attr(attr
, nlh
, sizeof(struct rtmsg
), rem
) {
2599 switch (nla_type(attr
)) {
2601 mfcc
->mfcc_origin
.s_addr
= nla_get_be32(attr
);
2604 mfcc
->mfcc_mcastgrp
.s_addr
= nla_get_be32(attr
);
2607 dev
= __dev_get_by_index(net
, nla_get_u32(attr
));
2614 if (ipmr_nla_get_ttls(attr
, mfcc
) < 0) {
2623 tblid
= nla_get_u32(attr
);
2627 mrt
= ipmr_get_table(net
, tblid
);
2633 *mrtsock
= rtm
->rtm_protocol
== RTPROT_MROUTED
? 1 : 0;
2635 mfcc
->mfcc_parent
= ipmr_find_vif(mrt
, dev
);
2641 /* takes care of both newroute and delroute */
2642 static int ipmr_rtm_route(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2643 struct netlink_ext_ack
*extack
)
2645 struct net
*net
= sock_net(skb
->sk
);
2646 int ret
, mrtsock
, parent
;
2647 struct mr_table
*tbl
;
2652 ret
= rtm_to_ipmr_mfcc(net
, nlh
, &mfcc
, &mrtsock
, &tbl
, extack
);
2656 parent
= ret
? mfcc
.mfcc_parent
: -1;
2657 if (nlh
->nlmsg_type
== RTM_NEWROUTE
)
2658 return ipmr_mfc_add(net
, tbl
, &mfcc
, mrtsock
, parent
);
2660 return ipmr_mfc_delete(tbl
, &mfcc
, parent
);
2663 static bool ipmr_fill_table(struct mr_table
*mrt
, struct sk_buff
*skb
)
2665 u32 queue_len
= atomic_read(&mrt
->cache_resolve_queue_len
);
2667 if (nla_put_u32(skb
, IPMRA_TABLE_ID
, mrt
->id
) ||
2668 nla_put_u32(skb
, IPMRA_TABLE_CACHE_RES_QUEUE_LEN
, queue_len
) ||
2669 nla_put_s32(skb
, IPMRA_TABLE_MROUTE_REG_VIF_NUM
,
2670 mrt
->mroute_reg_vif_num
) ||
2671 nla_put_u8(skb
, IPMRA_TABLE_MROUTE_DO_ASSERT
,
2672 mrt
->mroute_do_assert
) ||
2673 nla_put_u8(skb
, IPMRA_TABLE_MROUTE_DO_PIM
, mrt
->mroute_do_pim
))
2679 static bool ipmr_fill_vif(struct mr_table
*mrt
, u32 vifid
, struct sk_buff
*skb
)
2681 struct nlattr
*vif_nest
;
2682 struct vif_device
*vif
;
2684 /* if the VIF doesn't exist just continue */
2685 if (!VIF_EXISTS(mrt
, vifid
))
2688 vif
= &mrt
->vif_table
[vifid
];
2689 vif_nest
= nla_nest_start(skb
, IPMRA_VIF
);
2692 if (nla_put_u32(skb
, IPMRA_VIFA_IFINDEX
, vif
->dev
->ifindex
) ||
2693 nla_put_u32(skb
, IPMRA_VIFA_VIF_ID
, vifid
) ||
2694 nla_put_u16(skb
, IPMRA_VIFA_FLAGS
, vif
->flags
) ||
2695 nla_put_u64_64bit(skb
, IPMRA_VIFA_BYTES_IN
, vif
->bytes_in
,
2697 nla_put_u64_64bit(skb
, IPMRA_VIFA_BYTES_OUT
, vif
->bytes_out
,
2699 nla_put_u64_64bit(skb
, IPMRA_VIFA_PACKETS_IN
, vif
->pkt_in
,
2701 nla_put_u64_64bit(skb
, IPMRA_VIFA_PACKETS_OUT
, vif
->pkt_out
,
2703 nla_put_be32(skb
, IPMRA_VIFA_LOCAL_ADDR
, vif
->local
) ||
2704 nla_put_be32(skb
, IPMRA_VIFA_REMOTE_ADDR
, vif
->remote
)) {
2705 nla_nest_cancel(skb
, vif_nest
);
2708 nla_nest_end(skb
, vif_nest
);
2713 static int ipmr_rtm_dumplink(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2715 struct net
*net
= sock_net(skb
->sk
);
2716 struct nlmsghdr
*nlh
= NULL
;
2717 unsigned int t
= 0, s_t
;
2718 unsigned int e
= 0, s_e
;
2719 struct mr_table
*mrt
;
2724 ipmr_for_each_table(mrt
, net
) {
2725 struct nlattr
*vifs
, *af
;
2726 struct ifinfomsg
*hdr
;
2731 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
2732 cb
->nlh
->nlmsg_seq
, RTM_NEWLINK
,
2733 sizeof(*hdr
), NLM_F_MULTI
);
2737 hdr
= nlmsg_data(nlh
);
2738 memset(hdr
, 0, sizeof(*hdr
));
2739 hdr
->ifi_family
= RTNL_FAMILY_IPMR
;
2741 af
= nla_nest_start(skb
, IFLA_AF_SPEC
);
2743 nlmsg_cancel(skb
, nlh
);
2747 if (!ipmr_fill_table(mrt
, skb
)) {
2748 nlmsg_cancel(skb
, nlh
);
2752 vifs
= nla_nest_start(skb
, IPMRA_TABLE_VIFS
);
2754 nla_nest_end(skb
, af
);
2755 nlmsg_end(skb
, nlh
);
2758 for (i
= 0; i
< mrt
->maxvif
; i
++) {
2761 if (!ipmr_fill_vif(mrt
, i
, skb
)) {
2762 nla_nest_end(skb
, vifs
);
2763 nla_nest_end(skb
, af
);
2764 nlmsg_end(skb
, nlh
);
2772 nla_nest_end(skb
, vifs
);
2773 nla_nest_end(skb
, af
);
2774 nlmsg_end(skb
, nlh
);
2786 #ifdef CONFIG_PROC_FS
2787 /* The /proc interfaces to multicast routing :
2788 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2790 struct ipmr_vif_iter
{
2791 struct seq_net_private p
;
2792 struct mr_table
*mrt
;
2796 static struct vif_device
*ipmr_vif_seq_idx(struct net
*net
,
2797 struct ipmr_vif_iter
*iter
,
2800 struct mr_table
*mrt
= iter
->mrt
;
2802 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
2803 if (!VIF_EXISTS(mrt
, iter
->ct
))
2806 return &mrt
->vif_table
[iter
->ct
];
2811 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2812 __acquires(mrt_lock
)
2814 struct ipmr_vif_iter
*iter
= seq
->private;
2815 struct net
*net
= seq_file_net(seq
);
2816 struct mr_table
*mrt
;
2818 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2820 return ERR_PTR(-ENOENT
);
2824 read_lock(&mrt_lock
);
2825 return *pos
? ipmr_vif_seq_idx(net
, seq
->private, *pos
- 1)
2829 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2831 struct ipmr_vif_iter
*iter
= seq
->private;
2832 struct net
*net
= seq_file_net(seq
);
2833 struct mr_table
*mrt
= iter
->mrt
;
2836 if (v
== SEQ_START_TOKEN
)
2837 return ipmr_vif_seq_idx(net
, iter
, 0);
2839 while (++iter
->ct
< mrt
->maxvif
) {
2840 if (!VIF_EXISTS(mrt
, iter
->ct
))
2842 return &mrt
->vif_table
[iter
->ct
];
2847 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
2848 __releases(mrt_lock
)
2850 read_unlock(&mrt_lock
);
2853 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
2855 struct ipmr_vif_iter
*iter
= seq
->private;
2856 struct mr_table
*mrt
= iter
->mrt
;
2858 if (v
== SEQ_START_TOKEN
) {
2860 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2862 const struct vif_device
*vif
= v
;
2863 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
2866 "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2867 vif
- mrt
->vif_table
,
2868 name
, vif
->bytes_in
, vif
->pkt_in
,
2869 vif
->bytes_out
, vif
->pkt_out
,
2870 vif
->flags
, vif
->local
, vif
->remote
);
2875 static const struct seq_operations ipmr_vif_seq_ops
= {
2876 .start
= ipmr_vif_seq_start
,
2877 .next
= ipmr_vif_seq_next
,
2878 .stop
= ipmr_vif_seq_stop
,
2879 .show
= ipmr_vif_seq_show
,
2882 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
2884 return seq_open_net(inode
, file
, &ipmr_vif_seq_ops
,
2885 sizeof(struct ipmr_vif_iter
));
2888 static const struct file_operations ipmr_vif_fops
= {
2889 .owner
= THIS_MODULE
,
2890 .open
= ipmr_vif_open
,
2892 .llseek
= seq_lseek
,
2893 .release
= seq_release_net
,
2896 struct ipmr_mfc_iter
{
2897 struct seq_net_private p
;
2898 struct mr_table
*mrt
;
2899 struct list_head
*cache
;
2902 static struct mfc_cache
*ipmr_mfc_seq_idx(struct net
*net
,
2903 struct ipmr_mfc_iter
*it
, loff_t pos
)
2905 struct mr_table
*mrt
= it
->mrt
;
2906 struct mfc_cache
*mfc
;
2909 it
->cache
= &mrt
->mfc_cache_list
;
2910 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
2915 spin_lock_bh(&mfc_unres_lock
);
2916 it
->cache
= &mrt
->mfc_unres_queue
;
2917 list_for_each_entry(mfc
, it
->cache
, list
)
2920 spin_unlock_bh(&mfc_unres_lock
);
2927 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2929 struct ipmr_mfc_iter
*it
= seq
->private;
2930 struct net
*net
= seq_file_net(seq
);
2931 struct mr_table
*mrt
;
2933 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2935 return ERR_PTR(-ENOENT
);
2939 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
2943 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2945 struct ipmr_mfc_iter
*it
= seq
->private;
2946 struct net
*net
= seq_file_net(seq
);
2947 struct mr_table
*mrt
= it
->mrt
;
2948 struct mfc_cache
*mfc
= v
;
2952 if (v
== SEQ_START_TOKEN
)
2953 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
2955 if (mfc
->list
.next
!= it
->cache
)
2956 return list_entry(mfc
->list
.next
, struct mfc_cache
, list
);
2958 if (it
->cache
== &mrt
->mfc_unres_queue
)
2961 /* exhausted cache_array, show unresolved */
2963 it
->cache
= &mrt
->mfc_unres_queue
;
2965 spin_lock_bh(&mfc_unres_lock
);
2966 if (!list_empty(it
->cache
))
2967 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2970 spin_unlock_bh(&mfc_unres_lock
);
2976 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
2978 struct ipmr_mfc_iter
*it
= seq
->private;
2979 struct mr_table
*mrt
= it
->mrt
;
2981 if (it
->cache
== &mrt
->mfc_unres_queue
)
2982 spin_unlock_bh(&mfc_unres_lock
);
2983 else if (it
->cache
== &mrt
->mfc_cache_list
)
2987 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
2991 if (v
== SEQ_START_TOKEN
) {
2993 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2995 const struct mfc_cache
*mfc
= v
;
2996 const struct ipmr_mfc_iter
*it
= seq
->private;
2997 const struct mr_table
*mrt
= it
->mrt
;
2999 seq_printf(seq
, "%08X %08X %-3hd",
3000 (__force u32
) mfc
->mfc_mcastgrp
,
3001 (__force u32
) mfc
->mfc_origin
,
3004 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
3005 seq_printf(seq
, " %8lu %8lu %8lu",
3006 mfc
->mfc_un
.res
.pkt
,
3007 mfc
->mfc_un
.res
.bytes
,
3008 mfc
->mfc_un
.res
.wrong_if
);
3009 for (n
= mfc
->mfc_un
.res
.minvif
;
3010 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
3011 if (VIF_EXISTS(mrt
, n
) &&
3012 mfc
->mfc_un
.res
.ttls
[n
] < 255)
3015 n
, mfc
->mfc_un
.res
.ttls
[n
]);
3018 /* unresolved mfc_caches don't contain
3019 * pkt, bytes and wrong_if values
3021 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3023 seq_putc(seq
, '\n');
3028 static const struct seq_operations ipmr_mfc_seq_ops
= {
3029 .start
= ipmr_mfc_seq_start
,
3030 .next
= ipmr_mfc_seq_next
,
3031 .stop
= ipmr_mfc_seq_stop
,
3032 .show
= ipmr_mfc_seq_show
,
3035 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
3037 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
3038 sizeof(struct ipmr_mfc_iter
));
3041 static const struct file_operations ipmr_mfc_fops
= {
3042 .owner
= THIS_MODULE
,
3043 .open
= ipmr_mfc_open
,
3045 .llseek
= seq_lseek
,
3046 .release
= seq_release_net
,
3050 #ifdef CONFIG_IP_PIMSM_V2
3051 static const struct net_protocol pim_protocol
= {
3057 /* Setup for IP multicast routing */
3058 static int __net_init
ipmr_net_init(struct net
*net
)
3062 err
= ipmr_rules_init(net
);
3066 #ifdef CONFIG_PROC_FS
3068 if (!proc_create("ip_mr_vif", 0, net
->proc_net
, &ipmr_vif_fops
))
3070 if (!proc_create("ip_mr_cache", 0, net
->proc_net
, &ipmr_mfc_fops
))
3071 goto proc_cache_fail
;
3075 #ifdef CONFIG_PROC_FS
3077 remove_proc_entry("ip_mr_vif", net
->proc_net
);
3079 ipmr_rules_exit(net
);
3085 static void __net_exit
ipmr_net_exit(struct net
*net
)
3087 #ifdef CONFIG_PROC_FS
3088 remove_proc_entry("ip_mr_cache", net
->proc_net
);
3089 remove_proc_entry("ip_mr_vif", net
->proc_net
);
3091 ipmr_rules_exit(net
);
3094 static struct pernet_operations ipmr_net_ops
= {
3095 .init
= ipmr_net_init
,
3096 .exit
= ipmr_net_exit
,
3099 int __init
ip_mr_init(void)
3103 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
3104 sizeof(struct mfc_cache
),
3105 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
3108 err
= register_pernet_subsys(&ipmr_net_ops
);
3110 goto reg_pernet_fail
;
3112 err
= register_netdevice_notifier(&ip_mr_notifier
);
3114 goto reg_notif_fail
;
3115 #ifdef CONFIG_IP_PIMSM_V2
3116 if (inet_add_protocol(&pim_protocol
, IPPROTO_PIM
) < 0) {
3117 pr_err("%s: can't add PIM protocol\n", __func__
);
3119 goto add_proto_fail
;
3122 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETROUTE
,
3123 ipmr_rtm_getroute
, ipmr_rtm_dumproute
, 0);
3124 rtnl_register(RTNL_FAMILY_IPMR
, RTM_NEWROUTE
,
3125 ipmr_rtm_route
, NULL
, 0);
3126 rtnl_register(RTNL_FAMILY_IPMR
, RTM_DELROUTE
,
3127 ipmr_rtm_route
, NULL
, 0);
3129 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETLINK
,
3130 NULL
, ipmr_rtm_dumplink
, 0);
3133 #ifdef CONFIG_IP_PIMSM_V2
3135 unregister_netdevice_notifier(&ip_mr_notifier
);
3138 unregister_pernet_subsys(&ipmr_net_ops
);
3140 kmem_cache_destroy(mrt_cachep
);