dummy: convert to net_device_ops
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
7bc570c8
YH
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
7bc570c8
YH
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
7bc570c8 35#include <linux/init.h>
7bc570c8
YH
36#include <net/protocol.h>
37#include <linux/skbuff.h>
38#include <net/sock.h>
7bc570c8 39#include <net/raw.h>
7bc570c8
YH
40#include <linux/notifier.h>
41#include <linux/if_arp.h>
7bc570c8
YH
42#include <net/checksum.h>
43#include <net/netlink.h>
44
45#include <net/ipv6.h>
46#include <net/ip6_route.h>
47#include <linux/mroute6.h>
14fb64e1 48#include <linux/pim.h>
7bc570c8
YH
49#include <net/addrconf.h>
50#include <linux/netfilter_ipv6.h>
51
52struct sock *mroute6_socket;
53
54
55/* Big lock, protecting vif table, mrt cache and mroute socket state.
56 Note that the changes are semaphored via rtnl_lock.
57 */
58
59static DEFINE_RWLOCK(mrt_lock);
60
61/*
62 * Multicast router control variables
63 */
64
65static struct mif_device vif6_table[MAXMIFS]; /* Devices */
66static int maxvif;
67
68#define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
69
14fb64e1
YH
70static int mroute_do_assert; /* Set in PIM assert */
71#ifdef CONFIG_IPV6_PIMSM_V2
72static int mroute_do_pim;
73#else
74#define mroute_do_pim 0
75#endif
76
6ac7eb08 77static struct mfc6_cache *mfc6_cache_array[MFC6_LINES]; /* Forwarding cache */
7bc570c8
YH
78
79static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
80static atomic_t cache_resolve_queue_len; /* Size of unresolved */
81
82/* Special spinlock for queue of unresolved entries */
83static DEFINE_SPINLOCK(mfc_unres_lock);
84
85/* We return to original Alan's scheme. Hash table of resolved
86 entries is changed only in process context and protected
87 with weak lock mrt_lock. Queue of unresolved entries is protected
88 with strong spinlock mfc_unres_lock.
89
90 In this case data path is free of exclusive locks at all.
91 */
92
93static struct kmem_cache *mrt_cachep __read_mostly;
94
95static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
6ac7eb08 96static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert);
7bc570c8
YH
97static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
98
14fb64e1
YH
99#ifdef CONFIG_IPV6_PIMSM_V2
100static struct inet6_protocol pim6_protocol;
101#endif
102
7bc570c8
YH
103static struct timer_list ipmr_expire_timer;
104
105
106#ifdef CONFIG_PROC_FS
107
108struct ipmr_mfc_iter {
109 struct mfc6_cache **cache;
110 int ct;
111};
112
113
114static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
115{
116 struct mfc6_cache *mfc;
117
118 it->cache = mfc6_cache_array;
119 read_lock(&mrt_lock);
120 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
121 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
122 if (pos-- == 0)
123 return mfc;
124 read_unlock(&mrt_lock);
125
126 it->cache = &mfc_unres_queue;
127 spin_lock_bh(&mfc_unres_lock);
128 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
129 if (pos-- == 0)
130 return mfc;
131 spin_unlock_bh(&mfc_unres_lock);
132
133 it->cache = NULL;
134 return NULL;
135}
136
137
138
139
140/*
141 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
142 */
143
144struct ipmr_vif_iter {
145 int ct;
146};
147
148static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
149 loff_t pos)
150{
151 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
152 if (!MIF_EXISTS(iter->ct))
153 continue;
154 if (pos-- == 0)
155 return &vif6_table[iter->ct];
156 }
157 return NULL;
158}
159
160static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
161 __acquires(mrt_lock)
162{
163 read_lock(&mrt_lock);
164 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
165 : SEQ_START_TOKEN);
166}
167
168static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
169{
170 struct ipmr_vif_iter *iter = seq->private;
171
172 ++*pos;
173 if (v == SEQ_START_TOKEN)
174 return ip6mr_vif_seq_idx(iter, 0);
175
176 while (++iter->ct < maxvif) {
177 if (!MIF_EXISTS(iter->ct))
178 continue;
179 return &vif6_table[iter->ct];
180 }
181 return NULL;
182}
183
184static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
185 __releases(mrt_lock)
186{
187 read_unlock(&mrt_lock);
188}
189
190static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
191{
192 if (v == SEQ_START_TOKEN) {
193 seq_puts(seq,
194 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
195 } else {
196 const struct mif_device *vif = v;
197 const char *name = vif->dev ? vif->dev->name : "none";
198
199 seq_printf(seq,
d430a227 200 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
7bc570c8
YH
201 vif - vif6_table,
202 name, vif->bytes_in, vif->pkt_in,
203 vif->bytes_out, vif->pkt_out,
204 vif->flags);
205 }
206 return 0;
207}
208
209static struct seq_operations ip6mr_vif_seq_ops = {
210 .start = ip6mr_vif_seq_start,
211 .next = ip6mr_vif_seq_next,
212 .stop = ip6mr_vif_seq_stop,
213 .show = ip6mr_vif_seq_show,
214};
215
216static int ip6mr_vif_open(struct inode *inode, struct file *file)
217{
218 return seq_open_private(file, &ip6mr_vif_seq_ops,
219 sizeof(struct ipmr_vif_iter));
220}
221
222static struct file_operations ip6mr_vif_fops = {
223 .owner = THIS_MODULE,
224 .open = ip6mr_vif_open,
225 .read = seq_read,
226 .llseek = seq_lseek,
eedd726e 227 .release = seq_release_private,
7bc570c8
YH
228};
229
230static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
231{
232 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
233 : SEQ_START_TOKEN);
234}
235
236static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
237{
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
240
241 ++*pos;
242
243 if (v == SEQ_START_TOKEN)
244 return ipmr_mfc_seq_idx(seq->private, 0);
245
246 if (mfc->next)
247 return mfc->next;
248
249 if (it->cache == &mfc_unres_queue)
250 goto end_of_list;
251
252 BUG_ON(it->cache != mfc6_cache_array);
253
254 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
255 mfc = mfc6_cache_array[it->ct];
256 if (mfc)
257 return mfc;
258 }
259
260 /* exhausted cache_array, show unresolved */
261 read_unlock(&mrt_lock);
262 it->cache = &mfc_unres_queue;
263 it->ct = 0;
264
265 spin_lock_bh(&mfc_unres_lock);
266 mfc = mfc_unres_queue;
267 if (mfc)
268 return mfc;
269
270 end_of_list:
271 spin_unlock_bh(&mfc_unres_lock);
272 it->cache = NULL;
273
274 return NULL;
275}
276
277static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
278{
279 struct ipmr_mfc_iter *it = seq->private;
280
281 if (it->cache == &mfc_unres_queue)
282 spin_unlock_bh(&mfc_unres_lock);
283 else if (it->cache == mfc6_cache_array)
284 read_unlock(&mrt_lock);
285}
286
287static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
288{
289 int n;
290
291 if (v == SEQ_START_TOKEN) {
292 seq_puts(seq,
293 "Group "
294 "Origin "
295 "Iif Pkts Bytes Wrong Oifs\n");
296 } else {
297 const struct mfc6_cache *mfc = v;
298 const struct ipmr_mfc_iter *it = seq->private;
299
5b095d98 300 seq_printf(seq, "%pI6 %pI6 %-3d %8ld %8ld %8ld",
0c6ce78a 301 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
7bc570c8
YH
302 mfc->mf6c_parent,
303 mfc->mfc_un.res.pkt,
304 mfc->mfc_un.res.bytes,
305 mfc->mfc_un.res.wrong_if);
306
307 if (it->cache != &mfc_unres_queue) {
308 for (n = mfc->mfc_un.res.minvif;
309 n < mfc->mfc_un.res.maxvif; n++) {
310 if (MIF_EXISTS(n) &&
311 mfc->mfc_un.res.ttls[n] < 255)
312 seq_printf(seq,
313 " %2d:%-3d",
314 n, mfc->mfc_un.res.ttls[n]);
315 }
316 }
317 seq_putc(seq, '\n');
318 }
319 return 0;
320}
321
322static struct seq_operations ipmr_mfc_seq_ops = {
323 .start = ipmr_mfc_seq_start,
324 .next = ipmr_mfc_seq_next,
325 .stop = ipmr_mfc_seq_stop,
326 .show = ipmr_mfc_seq_show,
327};
328
329static int ipmr_mfc_open(struct inode *inode, struct file *file)
330{
331 return seq_open_private(file, &ipmr_mfc_seq_ops,
332 sizeof(struct ipmr_mfc_iter));
333}
334
335static struct file_operations ip6mr_mfc_fops = {
336 .owner = THIS_MODULE,
337 .open = ipmr_mfc_open,
338 .read = seq_read,
339 .llseek = seq_lseek,
eedd726e 340 .release = seq_release_private,
7bc570c8
YH
341};
342#endif
343
14fb64e1
YH
344#ifdef CONFIG_IPV6_PIMSM_V2
345static int reg_vif_num = -1;
346
347static int pim6_rcv(struct sk_buff *skb)
348{
349 struct pimreghdr *pim;
350 struct ipv6hdr *encap;
351 struct net_device *reg_dev = NULL;
352
353 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
354 goto drop;
355
356 pim = (struct pimreghdr *)skb_transport_header(skb);
357 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
358 (pim->flags & PIM_NULL_REGISTER) ||
359 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
ec6b486f 360 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
361 goto drop;
362
363 /* check if the inner packet is destined to mcast group */
364 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
365 sizeof(*pim));
366
367 if (!ipv6_addr_is_multicast(&encap->daddr) ||
368 encap->payload_len == 0 ||
369 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
370 goto drop;
371
372 read_lock(&mrt_lock);
373 if (reg_vif_num >= 0)
374 reg_dev = vif6_table[reg_vif_num].dev;
375 if (reg_dev)
376 dev_hold(reg_dev);
377 read_unlock(&mrt_lock);
378
379 if (reg_dev == NULL)
380 goto drop;
381
382 skb->mac_header = skb->network_header;
383 skb_pull(skb, (u8 *)encap - skb->data);
384 skb_reset_network_header(skb);
385 skb->dev = reg_dev;
386 skb->protocol = htons(ETH_P_IP);
387 skb->ip_summed = 0;
388 skb->pkt_type = PACKET_HOST;
389 dst_release(skb->dst);
dc58c78c
PE
390 reg_dev->stats.rx_bytes += skb->len;
391 reg_dev->stats.rx_packets++;
14fb64e1
YH
392 skb->dst = NULL;
393 nf_reset(skb);
394 netif_rx(skb);
395 dev_put(reg_dev);
396 return 0;
397 drop:
398 kfree_skb(skb);
399 return 0;
400}
401
402static struct inet6_protocol pim6_protocol = {
403 .handler = pim6_rcv,
404};
405
406/* Service routines creating virtual interfaces: PIMREG */
407
408static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
409{
410 read_lock(&mrt_lock);
dc58c78c
PE
411 dev->stats.tx_bytes += skb->len;
412 dev->stats.tx_packets++;
14fb64e1
YH
413 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
414 read_unlock(&mrt_lock);
415 kfree_skb(skb);
416 return 0;
417}
418
14fb64e1
YH
419static void reg_vif_setup(struct net_device *dev)
420{
421 dev->type = ARPHRD_PIMREG;
422 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
423 dev->flags = IFF_NOARP;
424 dev->hard_start_xmit = reg_vif_xmit;
14fb64e1
YH
425 dev->destructor = free_netdev;
426}
427
428static struct net_device *ip6mr_reg_vif(void)
429{
430 struct net_device *dev;
14fb64e1 431
dc58c78c 432 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
14fb64e1
YH
433 if (dev == NULL)
434 return NULL;
435
436 if (register_netdevice(dev)) {
437 free_netdev(dev);
438 return NULL;
439 }
440 dev->iflink = 0;
441
14fb64e1
YH
442 if (dev_open(dev))
443 goto failure;
444
7af3db78 445 dev_hold(dev);
14fb64e1
YH
446 return dev;
447
448failure:
449 /* allow the register to be completed before unregistering. */
450 rtnl_unlock();
451 rtnl_lock();
452
453 unregister_netdevice(dev);
454 return NULL;
455}
456#endif
457
7bc570c8
YH
458/*
459 * Delete a VIF entry
460 */
461
462static int mif6_delete(int vifi)
463{
464 struct mif_device *v;
465 struct net_device *dev;
466 if (vifi < 0 || vifi >= maxvif)
467 return -EADDRNOTAVAIL;
468
469 v = &vif6_table[vifi];
470
471 write_lock_bh(&mrt_lock);
472 dev = v->dev;
473 v->dev = NULL;
474
475 if (!dev) {
476 write_unlock_bh(&mrt_lock);
477 return -EADDRNOTAVAIL;
478 }
479
14fb64e1
YH
480#ifdef CONFIG_IPV6_PIMSM_V2
481 if (vifi == reg_vif_num)
482 reg_vif_num = -1;
483#endif
484
7bc570c8
YH
485 if (vifi + 1 == maxvif) {
486 int tmp;
487 for (tmp = vifi - 1; tmp >= 0; tmp--) {
488 if (MIF_EXISTS(tmp))
489 break;
490 }
491 maxvif = tmp + 1;
492 }
493
494 write_unlock_bh(&mrt_lock);
495
496 dev_set_allmulti(dev, -1);
497
498 if (v->flags & MIFF_REGISTER)
499 unregister_netdevice(dev);
500
501 dev_put(dev);
502 return 0;
503}
504
505/* Destroy an unresolved cache entry, killing queued skbs
506 and reporting error to netlink readers.
507 */
508
509static void ip6mr_destroy_unres(struct mfc6_cache *c)
510{
511 struct sk_buff *skb;
512
513 atomic_dec(&cache_resolve_queue_len);
514
515 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
516 if (ipv6_hdr(skb)->version == 0) {
517 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
518 nlh->nlmsg_type = NLMSG_ERROR;
519 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
520 skb_trim(skb, nlh->nlmsg_len);
521 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
522 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
523 } else
524 kfree_skb(skb);
525 }
526
527 kmem_cache_free(mrt_cachep, c);
528}
529
530
531/* Single timer process for all the unresolved queue. */
532
533static void ipmr_do_expire_process(unsigned long dummy)
534{
535 unsigned long now = jiffies;
536 unsigned long expires = 10 * HZ;
537 struct mfc6_cache *c, **cp;
538
539 cp = &mfc_unres_queue;
540
541 while ((c = *cp) != NULL) {
542 if (time_after(c->mfc_un.unres.expires, now)) {
543 /* not yet... */
544 unsigned long interval = c->mfc_un.unres.expires - now;
545 if (interval < expires)
546 expires = interval;
547 cp = &c->next;
548 continue;
549 }
550
551 *cp = c->next;
552 ip6mr_destroy_unres(c);
553 }
554
555 if (atomic_read(&cache_resolve_queue_len))
556 mod_timer(&ipmr_expire_timer, jiffies + expires);
557}
558
559static void ipmr_expire_process(unsigned long dummy)
560{
561 if (!spin_trylock(&mfc_unres_lock)) {
562 mod_timer(&ipmr_expire_timer, jiffies + 1);
563 return;
564 }
565
566 if (atomic_read(&cache_resolve_queue_len))
567 ipmr_do_expire_process(dummy);
568
569 spin_unlock(&mfc_unres_lock);
570}
571
572/* Fill oifs list. It is called under write locked mrt_lock. */
573
574static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
575{
576 int vifi;
577
6ac7eb08 578 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 579 cache->mfc_un.res.maxvif = 0;
6ac7eb08 580 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8
YH
581
582 for (vifi = 0; vifi < maxvif; vifi++) {
583 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
584 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
585 if (cache->mfc_un.res.minvif > vifi)
586 cache->mfc_un.res.minvif = vifi;
587 if (cache->mfc_un.res.maxvif <= vifi)
588 cache->mfc_un.res.maxvif = vifi + 1;
589 }
590 }
591}
592
593static int mif6_add(struct mif6ctl *vifc, int mrtsock)
594{
595 int vifi = vifc->mif6c_mifi;
596 struct mif_device *v = &vif6_table[vifi];
597 struct net_device *dev;
5ae7b444 598 int err;
7bc570c8
YH
599
600 /* Is vif busy ? */
601 if (MIF_EXISTS(vifi))
602 return -EADDRINUSE;
603
604 switch (vifc->mif6c_flags) {
14fb64e1
YH
605#ifdef CONFIG_IPV6_PIMSM_V2
606 case MIFF_REGISTER:
607 /*
608 * Special Purpose VIF in PIM
609 * All the packets will be sent to the daemon
610 */
611 if (reg_vif_num >= 0)
612 return -EADDRINUSE;
613 dev = ip6mr_reg_vif();
614 if (!dev)
615 return -ENOBUFS;
5ae7b444
WC
616 err = dev_set_allmulti(dev, 1);
617 if (err) {
618 unregister_netdevice(dev);
7af3db78 619 dev_put(dev);
5ae7b444
WC
620 return err;
621 }
14fb64e1
YH
622 break;
623#endif
7bc570c8
YH
624 case 0:
625 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
626 if (!dev)
627 return -EADDRNOTAVAIL;
5ae7b444 628 err = dev_set_allmulti(dev, 1);
7af3db78
WC
629 if (err) {
630 dev_put(dev);
5ae7b444 631 return err;
7af3db78 632 }
7bc570c8
YH
633 break;
634 default:
635 return -EINVAL;
636 }
637
7bc570c8
YH
638 /*
639 * Fill in the VIF structures
640 */
641 v->rate_limit = vifc->vifc_rate_limit;
642 v->flags = vifc->mif6c_flags;
643 if (!mrtsock)
644 v->flags |= VIFF_STATIC;
645 v->threshold = vifc->vifc_threshold;
646 v->bytes_in = 0;
647 v->bytes_out = 0;
648 v->pkt_in = 0;
649 v->pkt_out = 0;
650 v->link = dev->ifindex;
651 if (v->flags & MIFF_REGISTER)
652 v->link = dev->iflink;
653
654 /* And finish update writing critical data */
655 write_lock_bh(&mrt_lock);
7bc570c8 656 v->dev = dev;
14fb64e1
YH
657#ifdef CONFIG_IPV6_PIMSM_V2
658 if (v->flags & MIFF_REGISTER)
659 reg_vif_num = vifi;
660#endif
7bc570c8
YH
661 if (vifi + 1 > maxvif)
662 maxvif = vifi + 1;
663 write_unlock_bh(&mrt_lock);
664 return 0;
665}
666
667static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
668{
669 int line = MFC6_HASH(mcastgrp, origin);
670 struct mfc6_cache *c;
671
672 for (c = mfc6_cache_array[line]; c; c = c->next) {
673 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
674 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
675 break;
676 }
677 return c;
678}
679
680/*
681 * Allocate a multicast cache entry
682 */
683static struct mfc6_cache *ip6mr_cache_alloc(void)
684{
685 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
686 if (c == NULL)
687 return NULL;
688 memset(c, 0, sizeof(*c));
6ac7eb08 689 c->mfc_un.res.minvif = MAXMIFS;
7bc570c8
YH
690 return c;
691}
692
693static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
694{
695 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
696 if (c == NULL)
697 return NULL;
698 memset(c, 0, sizeof(*c));
699 skb_queue_head_init(&c->mfc_un.unres.unresolved);
700 c->mfc_un.unres.expires = jiffies + 10 * HZ;
701 return c;
702}
703
704/*
705 * A cache entry has gone into a resolved state from queued
706 */
707
708static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
709{
710 struct sk_buff *skb;
711
712 /*
713 * Play the pending entries through our router
714 */
715
716 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
717 if (ipv6_hdr(skb)->version == 0) {
718 int err;
719 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
720
721 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
549e028d 722 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
723 } else {
724 nlh->nlmsg_type = NLMSG_ERROR;
725 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
726 skb_trim(skb, nlh->nlmsg_len);
727 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
728 }
729 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
730 } else
731 ip6_mr_forward(skb, c);
732 }
733}
734
735/*
736 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
737 * expects the following bizarre scheme.
738 *
739 * Called under mrt_lock.
740 */
741
6ac7eb08 742static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
7bc570c8
YH
743{
744 struct sk_buff *skb;
745 struct mrt6msg *msg;
746 int ret;
747
14fb64e1
YH
748#ifdef CONFIG_IPV6_PIMSM_V2
749 if (assert == MRT6MSG_WHOLEPKT)
750 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
751 +sizeof(*msg));
752 else
753#endif
754 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
755
756 if (!skb)
757 return -ENOBUFS;
758
759 /* I suppose that internal messages
760 * do not require checksums */
761
762 skb->ip_summed = CHECKSUM_UNNECESSARY;
763
14fb64e1
YH
764#ifdef CONFIG_IPV6_PIMSM_V2
765 if (assert == MRT6MSG_WHOLEPKT) {
766 /* Ugly, but we have no choice with this interface.
767 Duplicate old header, fix length etc.
768 And all this only to mangle msg->im6_msgtype and
769 to set msg->im6_mbz to "mbz" :-)
770 */
771 skb_push(skb, -skb_network_offset(pkt));
772
773 skb_push(skb, sizeof(*msg));
774 skb_reset_transport_header(skb);
775 msg = (struct mrt6msg *)skb_transport_header(skb);
776 msg->im6_mbz = 0;
777 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
778 msg->im6_mif = reg_vif_num;
779 msg->im6_pad = 0;
780 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
781 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
782
783 skb->ip_summed = CHECKSUM_UNNECESSARY;
784 } else
785#endif
786 {
7bc570c8
YH
787 /*
788 * Copy the IP header
789 */
790
791 skb_put(skb, sizeof(struct ipv6hdr));
792 skb_reset_network_header(skb);
793 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
794
795 /*
796 * Add our header
797 */
798 skb_put(skb, sizeof(*msg));
799 skb_reset_transport_header(skb);
800 msg = (struct mrt6msg *)skb_transport_header(skb);
801
802 msg->im6_mbz = 0;
803 msg->im6_msgtype = assert;
6ac7eb08 804 msg->im6_mif = mifi;
7bc570c8
YH
805 msg->im6_pad = 0;
806 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
807 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
808
809 skb->dst = dst_clone(pkt->dst);
810 skb->ip_summed = CHECKSUM_UNNECESSARY;
811
812 skb_pull(skb, sizeof(struct ipv6hdr));
14fb64e1 813 }
7bc570c8
YH
814
815 if (mroute6_socket == NULL) {
816 kfree_skb(skb);
817 return -EINVAL;
818 }
819
820 /*
821 * Deliver to user space multicast routing algorithms
822 */
823 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
824 if (net_ratelimit())
825 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
826 kfree_skb(skb);
827 }
828
829 return ret;
830}
831
832/*
833 * Queue a packet for resolution. It gets locked cache entry!
834 */
835
836static int
6ac7eb08 837ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
7bc570c8
YH
838{
839 int err;
840 struct mfc6_cache *c;
841
842 spin_lock_bh(&mfc_unres_lock);
843 for (c = mfc_unres_queue; c; c = c->next) {
844 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
845 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
846 break;
847 }
848
849 if (c == NULL) {
850 /*
851 * Create a new entry if allowable
852 */
853
854 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
855 (c = ip6mr_cache_alloc_unres()) == NULL) {
856 spin_unlock_bh(&mfc_unres_lock);
857
858 kfree_skb(skb);
859 return -ENOBUFS;
860 }
861
862 /*
863 * Fill in the new cache entry
864 */
865 c->mf6c_parent = -1;
866 c->mf6c_origin = ipv6_hdr(skb)->saddr;
867 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
868
869 /*
870 * Reflect first query at pim6sd
871 */
6ac7eb08 872 if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
7bc570c8
YH
873 /* If the report failed throw the cache entry
874 out - Brad Parker
875 */
876 spin_unlock_bh(&mfc_unres_lock);
877
878 kmem_cache_free(mrt_cachep, c);
879 kfree_skb(skb);
880 return err;
881 }
882
883 atomic_inc(&cache_resolve_queue_len);
884 c->next = mfc_unres_queue;
885 mfc_unres_queue = c;
886
887 ipmr_do_expire_process(1);
888 }
889
890 /*
891 * See if we can append the packet
892 */
893 if (c->mfc_un.unres.unresolved.qlen > 3) {
894 kfree_skb(skb);
895 err = -ENOBUFS;
896 } else {
897 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
898 err = 0;
899 }
900
901 spin_unlock_bh(&mfc_unres_lock);
902 return err;
903}
904
905/*
906 * MFC6 cache manipulation by user space
907 */
908
909static int ip6mr_mfc_delete(struct mf6cctl *mfc)
910{
911 int line;
912 struct mfc6_cache *c, **cp;
913
914 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
915
916 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
917 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
918 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
919 write_lock_bh(&mrt_lock);
920 *cp = c->next;
921 write_unlock_bh(&mrt_lock);
922
923 kmem_cache_free(mrt_cachep, c);
924 return 0;
925 }
926 }
927 return -ENOENT;
928}
929
930static int ip6mr_device_event(struct notifier_block *this,
931 unsigned long event, void *ptr)
932{
933 struct net_device *dev = ptr;
934 struct mif_device *v;
935 int ct;
936
721499e8 937 if (!net_eq(dev_net(dev), &init_net))
7bc570c8
YH
938 return NOTIFY_DONE;
939
940 if (event != NETDEV_UNREGISTER)
941 return NOTIFY_DONE;
942
943 v = &vif6_table[0];
944 for (ct = 0; ct < maxvif; ct++, v++) {
945 if (v->dev == dev)
946 mif6_delete(ct);
947 }
948 return NOTIFY_DONE;
949}
950
951static struct notifier_block ip6_mr_notifier = {
952 .notifier_call = ip6mr_device_event
953};
954
955/*
956 * Setup for IP multicast routing
957 */
958
623d1a1a 959int __init ip6_mr_init(void)
7bc570c8 960{
623d1a1a
WC
961 int err;
962
7bc570c8
YH
963 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
964 sizeof(struct mfc6_cache),
965 0, SLAB_HWCACHE_ALIGN,
966 NULL);
967 if (!mrt_cachep)
623d1a1a 968 return -ENOMEM;
7bc570c8
YH
969
970 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
623d1a1a
WC
971 err = register_netdevice_notifier(&ip6_mr_notifier);
972 if (err)
973 goto reg_notif_fail;
974#ifdef CONFIG_PROC_FS
975 err = -ENOMEM;
976 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
977 goto proc_vif_fail;
978 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
979 0, &ip6mr_mfc_fops))
980 goto proc_cache_fail;
981#endif
982 return 0;
7bc570c8 983#ifdef CONFIG_PROC_FS
623d1a1a
WC
984proc_cache_fail:
985 proc_net_remove(&init_net, "ip6_mr_vif");
87b30a65
BT
986proc_vif_fail:
987 unregister_netdevice_notifier(&ip6_mr_notifier);
7bc570c8 988#endif
87b30a65
BT
989reg_notif_fail:
990 del_timer(&ipmr_expire_timer);
991 kmem_cache_destroy(mrt_cachep);
623d1a1a 992 return err;
7bc570c8
YH
993}
994
623d1a1a
WC
995void ip6_mr_cleanup(void)
996{
997#ifdef CONFIG_PROC_FS
998 proc_net_remove(&init_net, "ip6_mr_cache");
999 proc_net_remove(&init_net, "ip6_mr_vif");
1000#endif
1001 unregister_netdevice_notifier(&ip6_mr_notifier);
1002 del_timer(&ipmr_expire_timer);
1003 kmem_cache_destroy(mrt_cachep);
1004}
7bc570c8
YH
1005
1006static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
1007{
1008 int line;
1009 struct mfc6_cache *uc, *c, **cp;
6ac7eb08 1010 unsigned char ttls[MAXMIFS];
7bc570c8
YH
1011 int i;
1012
6ac7eb08
RR
1013 memset(ttls, 255, MAXMIFS);
1014 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1015 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1016 ttls[i] = 1;
1017
1018 }
1019
1020 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1021
1022 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
1023 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1024 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1025 break;
1026 }
1027
1028 if (c != NULL) {
1029 write_lock_bh(&mrt_lock);
1030 c->mf6c_parent = mfc->mf6cc_parent;
1031 ip6mr_update_thresholds(c, ttls);
1032 if (!mrtsock)
1033 c->mfc_flags |= MFC_STATIC;
1034 write_unlock_bh(&mrt_lock);
1035 return 0;
1036 }
1037
1038 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1039 return -EINVAL;
1040
1041 c = ip6mr_cache_alloc();
1042 if (c == NULL)
1043 return -ENOMEM;
1044
1045 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1046 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1047 c->mf6c_parent = mfc->mf6cc_parent;
1048 ip6mr_update_thresholds(c, ttls);
1049 if (!mrtsock)
1050 c->mfc_flags |= MFC_STATIC;
1051
1052 write_lock_bh(&mrt_lock);
1053 c->next = mfc6_cache_array[line];
1054 mfc6_cache_array[line] = c;
1055 write_unlock_bh(&mrt_lock);
1056
1057 /*
1058 * Check to see if we resolved a queued list. If so we
1059 * need to send on the frames and tidy up.
1060 */
1061 spin_lock_bh(&mfc_unres_lock);
1062 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1063 cp = &uc->next) {
1064 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1065 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1066 *cp = uc->next;
1067 if (atomic_dec_and_test(&cache_resolve_queue_len))
1068 del_timer(&ipmr_expire_timer);
1069 break;
1070 }
1071 }
1072 spin_unlock_bh(&mfc_unres_lock);
1073
1074 if (uc) {
1075 ip6mr_cache_resolve(uc, c);
1076 kmem_cache_free(mrt_cachep, uc);
1077 }
1078 return 0;
1079}
1080
1081/*
1082 * Close the multicast socket, and clear the vif tables etc
1083 */
1084
1085static void mroute_clean_tables(struct sock *sk)
1086{
1087 int i;
1088
1089 /*
1090 * Shut down all active vif entries
1091 */
1092 for (i = 0; i < maxvif; i++) {
1093 if (!(vif6_table[i].flags & VIFF_STATIC))
1094 mif6_delete(i);
1095 }
1096
1097 /*
1098 * Wipe the cache
1099 */
1100 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
1101 struct mfc6_cache *c, **cp;
1102
1103 cp = &mfc6_cache_array[i];
1104 while ((c = *cp) != NULL) {
1105 if (c->mfc_flags & MFC_STATIC) {
1106 cp = &c->next;
1107 continue;
1108 }
1109 write_lock_bh(&mrt_lock);
1110 *cp = c->next;
1111 write_unlock_bh(&mrt_lock);
1112
1113 kmem_cache_free(mrt_cachep, c);
1114 }
1115 }
1116
1117 if (atomic_read(&cache_resolve_queue_len) != 0) {
1118 struct mfc6_cache *c;
1119
1120 spin_lock_bh(&mfc_unres_lock);
1121 while (mfc_unres_queue != NULL) {
1122 c = mfc_unres_queue;
1123 mfc_unres_queue = c->next;
1124 spin_unlock_bh(&mfc_unres_lock);
1125
1126 ip6mr_destroy_unres(c);
1127
1128 spin_lock_bh(&mfc_unres_lock);
1129 }
1130 spin_unlock_bh(&mfc_unres_lock);
1131 }
1132}
1133
1134static int ip6mr_sk_init(struct sock *sk)
1135{
1136 int err = 0;
1137
1138 rtnl_lock();
1139 write_lock_bh(&mrt_lock);
1140 if (likely(mroute6_socket == NULL))
1141 mroute6_socket = sk;
1142 else
1143 err = -EADDRINUSE;
1144 write_unlock_bh(&mrt_lock);
1145
1146 rtnl_unlock();
1147
1148 return err;
1149}
1150
1151int ip6mr_sk_done(struct sock *sk)
1152{
1153 int err = 0;
1154
1155 rtnl_lock();
1156 if (sk == mroute6_socket) {
1157 write_lock_bh(&mrt_lock);
1158 mroute6_socket = NULL;
1159 write_unlock_bh(&mrt_lock);
1160
1161 mroute_clean_tables(sk);
1162 } else
1163 err = -EACCES;
1164 rtnl_unlock();
1165
1166 return err;
1167}
1168
1169/*
1170 * Socket options and virtual interface manipulation. The whole
1171 * virtual interface system is a complete heap, but unfortunately
1172 * that's how BSD mrouted happens to think. Maybe one day with a proper
1173 * MOSPF/PIM router set up we can clean this up.
1174 */
1175
1176int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1177{
1178 int ret;
1179 struct mif6ctl vif;
1180 struct mf6cctl mfc;
1181 mifi_t mifi;
1182
1183 if (optname != MRT6_INIT) {
1184 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
1185 return -EACCES;
1186 }
1187
1188 switch (optname) {
1189 case MRT6_INIT:
1190 if (sk->sk_type != SOCK_RAW ||
1191 inet_sk(sk)->num != IPPROTO_ICMPV6)
1192 return -EOPNOTSUPP;
1193 if (optlen < sizeof(int))
1194 return -EINVAL;
1195
1196 return ip6mr_sk_init(sk);
1197
1198 case MRT6_DONE:
1199 return ip6mr_sk_done(sk);
1200
1201 case MRT6_ADD_MIF:
1202 if (optlen < sizeof(vif))
1203 return -EINVAL;
1204 if (copy_from_user(&vif, optval, sizeof(vif)))
1205 return -EFAULT;
6ac7eb08 1206 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1207 return -ENFILE;
1208 rtnl_lock();
1209 ret = mif6_add(&vif, sk == mroute6_socket);
1210 rtnl_unlock();
1211 return ret;
1212
1213 case MRT6_DEL_MIF:
1214 if (optlen < sizeof(mifi_t))
1215 return -EINVAL;
1216 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1217 return -EFAULT;
1218 rtnl_lock();
1219 ret = mif6_delete(mifi);
1220 rtnl_unlock();
1221 return ret;
1222
1223 /*
1224 * Manipulate the forwarding caches. These live
1225 * in a sort of kernel/user symbiosis.
1226 */
1227 case MRT6_ADD_MFC:
1228 case MRT6_DEL_MFC:
1229 if (optlen < sizeof(mfc))
1230 return -EINVAL;
1231 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1232 return -EFAULT;
1233 rtnl_lock();
1234 if (optname == MRT6_DEL_MFC)
1235 ret = ip6mr_mfc_delete(&mfc);
1236 else
1237 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1238 rtnl_unlock();
1239 return ret;
1240
14fb64e1
YH
1241 /*
1242 * Control PIM assert (to activate pim will activate assert)
1243 */
1244 case MRT6_ASSERT:
1245 {
1246 int v;
1247 if (get_user(v, (int __user *)optval))
1248 return -EFAULT;
1249 mroute_do_assert = !!v;
1250 return 0;
1251 }
1252
1253#ifdef CONFIG_IPV6_PIMSM_V2
1254 case MRT6_PIM:
1255 {
a9f83bf3 1256 int v;
14fb64e1
YH
1257 if (get_user(v, (int __user *)optval))
1258 return -EFAULT;
1259 v = !!v;
1260 rtnl_lock();
1261 ret = 0;
1262 if (v != mroute_do_pim) {
1263 mroute_do_pim = v;
1264 mroute_do_assert = v;
1265 if (mroute_do_pim)
1266 ret = inet6_add_protocol(&pim6_protocol,
1267 IPPROTO_PIM);
1268 else
1269 ret = inet6_del_protocol(&pim6_protocol,
1270 IPPROTO_PIM);
1271 if (ret < 0)
1272 ret = -EAGAIN;
1273 }
1274 rtnl_unlock();
1275 return ret;
1276 }
1277
1278#endif
7bc570c8 1279 /*
7d120c55 1280 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1281 * set.
1282 */
1283 default:
1284 return -ENOPROTOOPT;
1285 }
1286}
1287
1288/*
1289 * Getsock opt support for the multicast routing system.
1290 */
1291
1292int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1293 int __user *optlen)
1294{
1295 int olr;
1296 int val;
1297
1298 switch (optname) {
1299 case MRT6_VERSION:
1300 val = 0x0305;
1301 break;
14fb64e1
YH
1302#ifdef CONFIG_IPV6_PIMSM_V2
1303 case MRT6_PIM:
1304 val = mroute_do_pim;
1305 break;
1306#endif
1307 case MRT6_ASSERT:
1308 val = mroute_do_assert;
1309 break;
7bc570c8
YH
1310 default:
1311 return -ENOPROTOOPT;
1312 }
1313
1314 if (get_user(olr, optlen))
1315 return -EFAULT;
1316
1317 olr = min_t(int, olr, sizeof(int));
1318 if (olr < 0)
1319 return -EINVAL;
1320
1321 if (put_user(olr, optlen))
1322 return -EFAULT;
1323 if (copy_to_user(optval, &val, olr))
1324 return -EFAULT;
1325 return 0;
1326}
1327
1328/*
1329 * The IP multicast ioctl support routines.
1330 */
1331
1332int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1333{
1334 struct sioc_sg_req6 sr;
1335 struct sioc_mif_req6 vr;
1336 struct mif_device *vif;
1337 struct mfc6_cache *c;
1338
1339 switch (cmd) {
1340 case SIOCGETMIFCNT_IN6:
1341 if (copy_from_user(&vr, arg, sizeof(vr)))
1342 return -EFAULT;
1343 if (vr.mifi >= maxvif)
1344 return -EINVAL;
1345 read_lock(&mrt_lock);
1346 vif = &vif6_table[vr.mifi];
1347 if (MIF_EXISTS(vr.mifi)) {
1348 vr.icount = vif->pkt_in;
1349 vr.ocount = vif->pkt_out;
1350 vr.ibytes = vif->bytes_in;
1351 vr.obytes = vif->bytes_out;
1352 read_unlock(&mrt_lock);
1353
1354 if (copy_to_user(arg, &vr, sizeof(vr)))
1355 return -EFAULT;
1356 return 0;
1357 }
1358 read_unlock(&mrt_lock);
1359 return -EADDRNOTAVAIL;
1360 case SIOCGETSGCNT_IN6:
1361 if (copy_from_user(&sr, arg, sizeof(sr)))
1362 return -EFAULT;
1363
1364 read_lock(&mrt_lock);
1365 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1366 if (c) {
1367 sr.pktcnt = c->mfc_un.res.pkt;
1368 sr.bytecnt = c->mfc_un.res.bytes;
1369 sr.wrong_if = c->mfc_un.res.wrong_if;
1370 read_unlock(&mrt_lock);
1371
1372 if (copy_to_user(arg, &sr, sizeof(sr)))
1373 return -EFAULT;
1374 return 0;
1375 }
1376 read_unlock(&mrt_lock);
1377 return -EADDRNOTAVAIL;
1378 default:
1379 return -ENOIOCTLCMD;
1380 }
1381}
1382
1383
1384static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1385{
483a47d2
DL
1386 IP6_INC_STATS_BH(dev_net(skb->dst->dev), ip6_dst_idev(skb->dst),
1387 IPSTATS_MIB_OUTFORWDATAGRAMS);
7bc570c8
YH
1388 return dst_output(skb);
1389}
1390
1391/*
1392 * Processing handlers for ip6mr_forward
1393 */
1394
1395static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1396{
1397 struct ipv6hdr *ipv6h;
1398 struct mif_device *vif = &vif6_table[vifi];
1399 struct net_device *dev;
1400 struct dst_entry *dst;
1401 struct flowi fl;
1402
1403 if (vif->dev == NULL)
1404 goto out_free;
1405
14fb64e1
YH
1406#ifdef CONFIG_IPV6_PIMSM_V2
1407 if (vif->flags & MIFF_REGISTER) {
1408 vif->pkt_out++;
1409 vif->bytes_out += skb->len;
dc58c78c
PE
1410 vif->dev->stats.tx_bytes += skb->len;
1411 vif->dev->stats.tx_packets++;
14fb64e1
YH
1412 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1413 kfree_skb(skb);
1414 return 0;
1415 }
1416#endif
1417
7bc570c8
YH
1418 ipv6h = ipv6_hdr(skb);
1419
1420 fl = (struct flowi) {
1421 .oif = vif->link,
1422 .nl_u = { .ip6_u =
1423 { .daddr = ipv6h->daddr, }
1424 }
1425 };
1426
1427 dst = ip6_route_output(&init_net, NULL, &fl);
1428 if (!dst)
1429 goto out_free;
1430
1431 dst_release(skb->dst);
1432 skb->dst = dst;
1433
1434 /*
1435 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1436 * not only before forwarding, but after forwarding on all output
1437 * interfaces. It is clear, if mrouter runs a multicasting
1438 * program, it should receive packets not depending to what interface
1439 * program is joined.
1440 * If we will not make it, the program will have to join on all
1441 * interfaces. On the other hand, multihoming host (or router, but
1442 * not mrouter) cannot join to more than one interface - it will
1443 * result in receiving multiple packets.
1444 */
1445 dev = vif->dev;
1446 skb->dev = dev;
1447 vif->pkt_out++;
1448 vif->bytes_out += skb->len;
1449
1450 /* We are about to write */
1451 /* XXX: extension headers? */
1452 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1453 goto out_free;
1454
1455 ipv6h = ipv6_hdr(skb);
1456 ipv6h->hop_limit--;
1457
1458 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1459
1460 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1461 ip6mr_forward2_finish);
1462
1463out_free:
1464 kfree_skb(skb);
1465 return 0;
1466}
1467
1468static int ip6mr_find_vif(struct net_device *dev)
1469{
1470 int ct;
1471 for (ct = maxvif - 1; ct >= 0; ct--) {
1472 if (vif6_table[ct].dev == dev)
1473 break;
1474 }
1475 return ct;
1476}
1477
1478static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1479{
1480 int psend = -1;
1481 int vif, ct;
1482
1483 vif = cache->mf6c_parent;
1484 cache->mfc_un.res.pkt++;
1485 cache->mfc_un.res.bytes += skb->len;
1486
14fb64e1
YH
1487 /*
1488 * Wrong interface: drop packet and (maybe) send PIM assert.
1489 */
1490 if (vif6_table[vif].dev != skb->dev) {
1491 int true_vifi;
1492
1493 cache->mfc_un.res.wrong_if++;
1494 true_vifi = ip6mr_find_vif(skb->dev);
1495
1496 if (true_vifi >= 0 && mroute_do_assert &&
1497 /* pimsm uses asserts, when switching from RPT to SPT,
1498 so that we cannot check that packet arrived on an oif.
1499 It is bad, but otherwise we would need to move pretty
1500 large chunk of pimd to kernel. Ough... --ANK
1501 */
1502 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1503 time_after(jiffies,
1504 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1505 cache->mfc_un.res.last_assert = jiffies;
1506 ip6mr_cache_report(skb, true_vifi, MRT6MSG_WRONGMIF);
1507 }
1508 goto dont_forward;
1509 }
1510
7bc570c8
YH
1511 vif6_table[vif].pkt_in++;
1512 vif6_table[vif].bytes_in += skb->len;
1513
1514 /*
1515 * Forward the frame
1516 */
1517 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1518 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1519 if (psend != -1) {
1520 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1521 if (skb2)
1522 ip6mr_forward2(skb2, cache, psend);
1523 }
1524 psend = ct;
1525 }
1526 }
1527 if (psend != -1) {
1528 ip6mr_forward2(skb, cache, psend);
1529 return 0;
1530 }
1531
14fb64e1 1532dont_forward:
7bc570c8
YH
1533 kfree_skb(skb);
1534 return 0;
1535}
1536
1537
1538/*
1539 * Multicast packets for forwarding arrive here
1540 */
1541
1542int ip6_mr_input(struct sk_buff *skb)
1543{
1544 struct mfc6_cache *cache;
1545
1546 read_lock(&mrt_lock);
1547 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1548
1549 /*
1550 * No usable cache entry
1551 */
1552 if (cache == NULL) {
1553 int vif;
1554
1555 vif = ip6mr_find_vif(skb->dev);
1556 if (vif >= 0) {
1557 int err = ip6mr_cache_unresolved(vif, skb);
1558 read_unlock(&mrt_lock);
1559
1560 return err;
1561 }
1562 read_unlock(&mrt_lock);
1563 kfree_skb(skb);
1564 return -ENODEV;
1565 }
1566
1567 ip6_mr_forward(skb, cache);
1568
1569 read_unlock(&mrt_lock);
1570
1571 return 0;
1572}
1573
1574
1575static int
1576ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1577{
1578 int ct;
1579 struct rtnexthop *nhp;
1580 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
549e028d 1581 u8 *b = skb_tail_pointer(skb);
7bc570c8
YH
1582 struct rtattr *mp_head;
1583
1584 if (dev)
1585 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1586
1587 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1588
1589 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1590 if (c->mfc_un.res.ttls[ct] < 255) {
1591 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1592 goto rtattr_failure;
1593 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1594 nhp->rtnh_flags = 0;
1595 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1596 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1597 nhp->rtnh_len = sizeof(*nhp);
1598 }
1599 }
1600 mp_head->rta_type = RTA_MULTIPATH;
549e028d 1601 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
7bc570c8
YH
1602 rtm->rtm_type = RTN_MULTICAST;
1603 return 1;
1604
1605rtattr_failure:
1606 nlmsg_trim(skb, b);
1607 return -EMSGSIZE;
1608}
1609
1610int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1611{
1612 int err;
1613 struct mfc6_cache *cache;
1614 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1615
1616 read_lock(&mrt_lock);
1617 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1618
1619 if (!cache) {
1620 struct sk_buff *skb2;
1621 struct ipv6hdr *iph;
1622 struct net_device *dev;
1623 int vif;
1624
1625 if (nowait) {
1626 read_unlock(&mrt_lock);
1627 return -EAGAIN;
1628 }
1629
1630 dev = skb->dev;
1631 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1632 read_unlock(&mrt_lock);
1633 return -ENODEV;
1634 }
1635
1636 /* really correct? */
1637 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1638 if (!skb2) {
1639 read_unlock(&mrt_lock);
1640 return -ENOMEM;
1641 }
1642
1643 skb_reset_transport_header(skb2);
1644
1645 skb_put(skb2, sizeof(struct ipv6hdr));
1646 skb_reset_network_header(skb2);
1647
1648 iph = ipv6_hdr(skb2);
1649 iph->version = 0;
1650 iph->priority = 0;
1651 iph->flow_lbl[0] = 0;
1652 iph->flow_lbl[1] = 0;
1653 iph->flow_lbl[2] = 0;
1654 iph->payload_len = 0;
1655 iph->nexthdr = IPPROTO_NONE;
1656 iph->hop_limit = 0;
1657 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1658 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1659
1660 err = ip6mr_cache_unresolved(vif, skb2);
1661 read_unlock(&mrt_lock);
1662
1663 return err;
1664 }
1665
1666 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1667 cache->mfc_flags |= MFC_NOTIFY;
1668
1669 err = ip6mr_fill_mroute(skb, cache, rtm);
1670 read_unlock(&mrt_lock);
1671 return err;
1672}
1673