ipv6: ip6mr: support multiple tables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
7bc570c8
YH
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
7bc570c8
YH
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
7bc570c8 35#include <linux/init.h>
5a0e3ad6 36#include <linux/slab.h>
7bc570c8
YH
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
7bc570c8 40#include <net/raw.h>
7bc570c8
YH
41#include <linux/notifier.h>
42#include <linux/if_arp.h>
7bc570c8
YH
43#include <net/checksum.h>
44#include <net/netlink.h>
d1db275d 45#include <net/fib_rules.h>
7bc570c8
YH
46
47#include <net/ipv6.h>
48#include <net/ip6_route.h>
49#include <linux/mroute6.h>
14fb64e1 50#include <linux/pim.h>
7bc570c8
YH
51#include <net/addrconf.h>
52#include <linux/netfilter_ipv6.h>
5d6e430d 53#include <net/ip6_checksum.h>
7bc570c8 54
6bd52143 55struct mr6_table {
d1db275d 56 struct list_head list;
6bd52143
PM
57#ifdef CONFIG_NET_NS
58 struct net *net;
59#endif
d1db275d 60 u32 id;
6bd52143
PM
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
66 int maxvif;
67 atomic_t cache_resolve_queue_len;
68 int mroute_do_assert;
69 int mroute_do_pim;
70#ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
72#endif
73};
74
d1db275d
PM
75struct ip6mr_rule {
76 struct fib_rule common;
77};
78
79struct ip6mr_result {
80 struct mr6_table *mrt;
81};
82
7bc570c8
YH
83/* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
85 */
86
87static DEFINE_RWLOCK(mrt_lock);
88
89/*
90 * Multicast router control variables
91 */
92
6bd52143 93#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
7bc570c8 94
7bc570c8
YH
95/* Special spinlock for queue of unresolved entries */
96static DEFINE_SPINLOCK(mfc_unres_lock);
97
98/* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
102
103 In this case data path is free of exclusive locks at all.
104 */
105
106static struct kmem_cache *mrt_cachep __read_mostly;
107
d1db275d
PM
108static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109static void ip6mr_free_table(struct mr6_table *mrt);
110
6bd52143
PM
111static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
8229efda 114 mifi_t mifi, int assert);
6bd52143 115static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
b5aa30b1 116 struct mfc6_cache *c, struct rtmsg *rtm);
6bd52143 117static void mroute_clean_tables(struct mr6_table *mrt);
d1db275d
PM
118static void ipmr_expire_process(unsigned long arg);
119
120#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
121#define ip6mr_for_each_table(mrt, met) \
122 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
123
124static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
125{
126 struct mr6_table *mrt;
127
128 ip6mr_for_each_table(mrt, net) {
129 if (mrt->id == id)
130 return mrt;
131 }
132 return NULL;
133}
134
135static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
136 struct mr6_table **mrt)
137{
138 struct ip6mr_result res;
139 struct fib_lookup_arg arg = { .result = &res, };
140 int err;
141
142 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg);
143 if (err < 0)
144 return err;
145 *mrt = res.mrt;
146 return 0;
147}
148
149static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
150 int flags, struct fib_lookup_arg *arg)
151{
152 struct ip6mr_result *res = arg->result;
153 struct mr6_table *mrt;
154
155 switch (rule->action) {
156 case FR_ACT_TO_TBL:
157 break;
158 case FR_ACT_UNREACHABLE:
159 return -ENETUNREACH;
160 case FR_ACT_PROHIBIT:
161 return -EACCES;
162 case FR_ACT_BLACKHOLE:
163 default:
164 return -EINVAL;
165 }
166
167 mrt = ip6mr_get_table(rule->fr_net, rule->table);
168 if (mrt == NULL)
169 return -EAGAIN;
170 res->mrt = mrt;
171 return 0;
172}
173
174static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
175{
176 return 1;
177}
178
179static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
180 FRA_GENERIC_POLICY,
181};
182
183static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
184 struct fib_rule_hdr *frh, struct nlattr **tb)
185{
186 return 0;
187}
188
189static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
190 struct nlattr **tb)
191{
192 return 1;
193}
194
195static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
196 struct fib_rule_hdr *frh)
197{
198 frh->dst_len = 0;
199 frh->src_len = 0;
200 frh->tos = 0;
201 return 0;
202}
203
204static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
205 .family = RTNL_FAMILY_IP6MR,
206 .rule_size = sizeof(struct ip6mr_rule),
207 .addr_size = sizeof(struct in6_addr),
208 .action = ip6mr_rule_action,
209 .match = ip6mr_rule_match,
210 .configure = ip6mr_rule_configure,
211 .compare = ip6mr_rule_compare,
212 .default_pref = fib_default_rule_pref,
213 .fill = ip6mr_rule_fill,
214 .nlgroup = RTNLGRP_IPV6_RULE,
215 .policy = ip6mr_rule_policy,
216 .owner = THIS_MODULE,
217};
218
219static int __net_init ip6mr_rules_init(struct net *net)
220{
221 struct fib_rules_ops *ops;
222 struct mr6_table *mrt;
223 int err;
224
225 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
226 if (IS_ERR(ops))
227 return PTR_ERR(ops);
228
229 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
230
231 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
232 if (mrt == NULL) {
233 err = -ENOMEM;
234 goto err1;
235 }
236
237 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
238 if (err < 0)
239 goto err2;
240
241 net->ipv6.mr6_rules_ops = ops;
242 return 0;
243
244err2:
245 kfree(mrt);
246err1:
247 fib_rules_unregister(ops);
248 return err;
249}
250
251static void __net_exit ip6mr_rules_exit(struct net *net)
252{
253 struct mr6_table *mrt, *next;
254
255 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
256 ip6mr_free_table(mrt);
257 fib_rules_unregister(net->ipv6.mr6_rules_ops);
258}
259#else
260#define ip6mr_for_each_table(mrt, net) \
261 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
262
263static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
264{
265 return net->ipv6.mrt6;
266}
267
268static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
269 struct mr6_table **mrt)
270{
271 *mrt = net->ipv6.mrt6;
272 return 0;
273}
274
275static int __net_init ip6mr_rules_init(struct net *net)
276{
277 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
278 return net->ipv6.mrt6 ? 0 : -ENOMEM;
279}
280
281static void __net_exit ip6mr_rules_exit(struct net *net)
282{
283 ip6mr_free_table(net->ipv6.mrt6);
284}
285#endif
286
287static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
288{
289 struct mr6_table *mrt;
290 unsigned int i;
291
292 mrt = ip6mr_get_table(net, id);
293 if (mrt != NULL)
294 return mrt;
295
296 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
297 if (mrt == NULL)
298 return NULL;
299 mrt->id = id;
300 write_pnet(&mrt->net, net);
301
302 /* Forwarding cache */
303 for (i = 0; i < MFC6_LINES; i++)
304 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
305
306 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
307
308 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
309 (unsigned long)mrt);
310
311#ifdef CONFIG_IPV6_PIMSM_V2
312 mrt->mroute_reg_vif_num = -1;
313#endif
314#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
315 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
316#endif
317 return mrt;
318}
7bc570c8 319
d1db275d
PM
320static void ip6mr_free_table(struct mr6_table *mrt)
321{
322 del_timer(&mrt->ipmr_expire_timer);
323 mroute_clean_tables(mrt);
324 kfree(mrt);
325}
7bc570c8
YH
326
327#ifdef CONFIG_PROC_FS
328
329struct ipmr_mfc_iter {
8b90fc7e 330 struct seq_net_private p;
d1db275d 331 struct mr6_table *mrt;
f30a7784 332 struct list_head *cache;
7bc570c8
YH
333 int ct;
334};
335
336
8b90fc7e
BT
337static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
338 struct ipmr_mfc_iter *it, loff_t pos)
7bc570c8 339{
d1db275d 340 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
341 struct mfc6_cache *mfc;
342
7bc570c8 343 read_lock(&mrt_lock);
f30a7784 344 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
6bd52143 345 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784 346 list_for_each_entry(mfc, it->cache, list)
7bc570c8
YH
347 if (pos-- == 0)
348 return mfc;
f30a7784 349 }
7bc570c8
YH
350 read_unlock(&mrt_lock);
351
7bc570c8 352 spin_lock_bh(&mfc_unres_lock);
6bd52143 353 it->cache = &mrt->mfc6_unres_queue;
f30a7784 354 list_for_each_entry(mfc, it->cache, list)
c476efbc 355 if (pos-- == 0)
7bc570c8
YH
356 return mfc;
357 spin_unlock_bh(&mfc_unres_lock);
358
359 it->cache = NULL;
360 return NULL;
361}
362
7bc570c8
YH
363/*
364 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
365 */
366
367struct ipmr_vif_iter {
8b90fc7e 368 struct seq_net_private p;
d1db275d 369 struct mr6_table *mrt;
7bc570c8
YH
370 int ct;
371};
372
8b90fc7e
BT
373static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
374 struct ipmr_vif_iter *iter,
7bc570c8
YH
375 loff_t pos)
376{
d1db275d 377 struct mr6_table *mrt = iter->mrt;
6bd52143
PM
378
379 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
380 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8
YH
381 continue;
382 if (pos-- == 0)
6bd52143 383 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
384 }
385 return NULL;
386}
387
388static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
389 __acquires(mrt_lock)
390{
d1db275d 391 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 392 struct net *net = seq_file_net(seq);
d1db275d
PM
393 struct mr6_table *mrt;
394
395 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
396 if (mrt == NULL)
397 return ERR_PTR(-ENOENT);
398
399 iter->mrt = mrt;
8b90fc7e 400
7bc570c8 401 read_lock(&mrt_lock);
8b90fc7e
BT
402 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
403 : SEQ_START_TOKEN;
7bc570c8
YH
404}
405
406static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
407{
408 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 409 struct net *net = seq_file_net(seq);
d1db275d 410 struct mr6_table *mrt = iter->mrt;
7bc570c8
YH
411
412 ++*pos;
413 if (v == SEQ_START_TOKEN)
8b90fc7e 414 return ip6mr_vif_seq_idx(net, iter, 0);
7bc570c8 415
6bd52143
PM
416 while (++iter->ct < mrt->maxvif) {
417 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8 418 continue;
6bd52143 419 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
420 }
421 return NULL;
422}
423
424static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
425 __releases(mrt_lock)
426{
427 read_unlock(&mrt_lock);
428}
429
430static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
431{
d1db275d
PM
432 struct ipmr_vif_iter *iter = seq->private;
433 struct mr6_table *mrt = iter->mrt;
8b90fc7e 434
7bc570c8
YH
435 if (v == SEQ_START_TOKEN) {
436 seq_puts(seq,
437 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
438 } else {
439 const struct mif_device *vif = v;
440 const char *name = vif->dev ? vif->dev->name : "none";
441
442 seq_printf(seq,
d430a227 443 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
6bd52143 444 vif - mrt->vif6_table,
7bc570c8
YH
445 name, vif->bytes_in, vif->pkt_in,
446 vif->bytes_out, vif->pkt_out,
447 vif->flags);
448 }
449 return 0;
450}
451
98147d52 452static const struct seq_operations ip6mr_vif_seq_ops = {
7bc570c8
YH
453 .start = ip6mr_vif_seq_start,
454 .next = ip6mr_vif_seq_next,
455 .stop = ip6mr_vif_seq_stop,
456 .show = ip6mr_vif_seq_show,
457};
458
459static int ip6mr_vif_open(struct inode *inode, struct file *file)
460{
8b90fc7e
BT
461 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
462 sizeof(struct ipmr_vif_iter));
7bc570c8
YH
463}
464
5ca1b998 465static const struct file_operations ip6mr_vif_fops = {
7bc570c8
YH
466 .owner = THIS_MODULE,
467 .open = ip6mr_vif_open,
468 .read = seq_read,
469 .llseek = seq_lseek,
8b90fc7e 470 .release = seq_release_net,
7bc570c8
YH
471};
472
473static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
474{
d1db275d 475 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 476 struct net *net = seq_file_net(seq);
d1db275d 477 struct mr6_table *mrt;
8b90fc7e 478
d1db275d
PM
479 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
480 if (mrt == NULL)
481 return ERR_PTR(-ENOENT);
482
483 it->mrt = mrt;
8b90fc7e
BT
484 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
485 : SEQ_START_TOKEN;
7bc570c8
YH
486}
487
488static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
489{
490 struct mfc6_cache *mfc = v;
491 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 492 struct net *net = seq_file_net(seq);
d1db275d 493 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
494
495 ++*pos;
496
497 if (v == SEQ_START_TOKEN)
8b90fc7e 498 return ipmr_mfc_seq_idx(net, seq->private, 0);
7bc570c8 499
f30a7784
PM
500 if (mfc->list.next != it->cache)
501 return list_entry(mfc->list.next, struct mfc6_cache, list);
7bc570c8 502
6bd52143 503 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8
YH
504 goto end_of_list;
505
6bd52143 506 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
7bc570c8 507
4a6258a0 508 while (++it->ct < MFC6_LINES) {
6bd52143 509 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784
PM
510 if (list_empty(it->cache))
511 continue;
512 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
513 }
514
515 /* exhausted cache_array, show unresolved */
516 read_unlock(&mrt_lock);
6bd52143 517 it->cache = &mrt->mfc6_unres_queue;
7bc570c8
YH
518 it->ct = 0;
519
520 spin_lock_bh(&mfc_unres_lock);
f30a7784
PM
521 if (!list_empty(it->cache))
522 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
523
524 end_of_list:
525 spin_unlock_bh(&mfc_unres_lock);
526 it->cache = NULL;
527
528 return NULL;
529}
530
531static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
532{
533 struct ipmr_mfc_iter *it = seq->private;
d1db275d 534 struct mr6_table *mrt = it->mrt;
7bc570c8 535
6bd52143 536 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8 537 spin_unlock_bh(&mfc_unres_lock);
6bd52143 538 else if (it->cache == mrt->mfc6_cache_array)
7bc570c8
YH
539 read_unlock(&mrt_lock);
540}
541
542static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
543{
544 int n;
545
546 if (v == SEQ_START_TOKEN) {
547 seq_puts(seq,
548 "Group "
549 "Origin "
550 "Iif Pkts Bytes Wrong Oifs\n");
551 } else {
552 const struct mfc6_cache *mfc = v;
553 const struct ipmr_mfc_iter *it = seq->private;
d1db275d 554 struct mr6_table *mrt = it->mrt;
7bc570c8 555
999890b2 556 seq_printf(seq, "%pI6 %pI6 %-3hd",
0c6ce78a 557 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
1ea472e2 558 mfc->mf6c_parent);
7bc570c8 559
6bd52143 560 if (it->cache != &mrt->mfc6_unres_queue) {
1ea472e2
BT
561 seq_printf(seq, " %8lu %8lu %8lu",
562 mfc->mfc_un.res.pkt,
563 mfc->mfc_un.res.bytes,
564 mfc->mfc_un.res.wrong_if);
7bc570c8
YH
565 for (n = mfc->mfc_un.res.minvif;
566 n < mfc->mfc_un.res.maxvif; n++) {
6bd52143 567 if (MIF_EXISTS(mrt, n) &&
7bc570c8
YH
568 mfc->mfc_un.res.ttls[n] < 255)
569 seq_printf(seq,
570 " %2d:%-3d",
571 n, mfc->mfc_un.res.ttls[n]);
572 }
1ea472e2
BT
573 } else {
574 /* unresolved mfc_caches don't contain
575 * pkt, bytes and wrong_if values
576 */
577 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
7bc570c8
YH
578 }
579 seq_putc(seq, '\n');
580 }
581 return 0;
582}
583
88e9d34c 584static const struct seq_operations ipmr_mfc_seq_ops = {
7bc570c8
YH
585 .start = ipmr_mfc_seq_start,
586 .next = ipmr_mfc_seq_next,
587 .stop = ipmr_mfc_seq_stop,
588 .show = ipmr_mfc_seq_show,
589};
590
591static int ipmr_mfc_open(struct inode *inode, struct file *file)
592{
8b90fc7e
BT
593 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
594 sizeof(struct ipmr_mfc_iter));
7bc570c8
YH
595}
596
5ca1b998 597static const struct file_operations ip6mr_mfc_fops = {
7bc570c8
YH
598 .owner = THIS_MODULE,
599 .open = ipmr_mfc_open,
600 .read = seq_read,
601 .llseek = seq_lseek,
8b90fc7e 602 .release = seq_release_net,
7bc570c8
YH
603};
604#endif
605
14fb64e1 606#ifdef CONFIG_IPV6_PIMSM_V2
14fb64e1
YH
607
608static int pim6_rcv(struct sk_buff *skb)
609{
610 struct pimreghdr *pim;
611 struct ipv6hdr *encap;
612 struct net_device *reg_dev = NULL;
8229efda 613 struct net *net = dev_net(skb->dev);
d1db275d
PM
614 struct mr6_table *mrt;
615 struct flowi fl = {
616 .iif = skb->dev->ifindex,
617 .mark = skb->mark,
618 };
619 int reg_vif_num;
14fb64e1
YH
620
621 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
622 goto drop;
623
624 pim = (struct pimreghdr *)skb_transport_header(skb);
625 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
626 (pim->flags & PIM_NULL_REGISTER) ||
1d6e55f1
TG
627 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
628 sizeof(*pim), IPPROTO_PIM,
629 csum_partial((void *)pim, sizeof(*pim), 0)) &&
ec6b486f 630 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
631 goto drop;
632
633 /* check if the inner packet is destined to mcast group */
634 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
635 sizeof(*pim));
636
637 if (!ipv6_addr_is_multicast(&encap->daddr) ||
638 encap->payload_len == 0 ||
639 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
640 goto drop;
641
d1db275d
PM
642 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
643 goto drop;
644 reg_vif_num = mrt->mroute_reg_vif_num;
645
14fb64e1
YH
646 read_lock(&mrt_lock);
647 if (reg_vif_num >= 0)
6bd52143 648 reg_dev = mrt->vif6_table[reg_vif_num].dev;
14fb64e1
YH
649 if (reg_dev)
650 dev_hold(reg_dev);
651 read_unlock(&mrt_lock);
652
653 if (reg_dev == NULL)
654 goto drop;
655
656 skb->mac_header = skb->network_header;
657 skb_pull(skb, (u8 *)encap - skb->data);
658 skb_reset_network_header(skb);
659 skb->dev = reg_dev;
1d6e55f1 660 skb->protocol = htons(ETH_P_IPV6);
14fb64e1
YH
661 skb->ip_summed = 0;
662 skb->pkt_type = PACKET_HOST;
adf30907 663 skb_dst_drop(skb);
dc58c78c
PE
664 reg_dev->stats.rx_bytes += skb->len;
665 reg_dev->stats.rx_packets++;
14fb64e1
YH
666 nf_reset(skb);
667 netif_rx(skb);
668 dev_put(reg_dev);
669 return 0;
670 drop:
671 kfree_skb(skb);
672 return 0;
673}
674
41135cc8 675static const struct inet6_protocol pim6_protocol = {
14fb64e1
YH
676 .handler = pim6_rcv,
677};
678
679/* Service routines creating virtual interfaces: PIMREG */
680
6fef4c0c
SH
681static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
682 struct net_device *dev)
14fb64e1 683{
8229efda 684 struct net *net = dev_net(dev);
d1db275d
PM
685 struct mr6_table *mrt;
686 struct flowi fl = {
687 .oif = dev->ifindex,
688 .iif = skb->skb_iif,
689 .mark = skb->mark,
690 };
691 int err;
692
693 err = ip6mr_fib_lookup(net, &fl, &mrt);
694 if (err < 0)
695 return err;
8229efda 696
14fb64e1 697 read_lock(&mrt_lock);
dc58c78c
PE
698 dev->stats.tx_bytes += skb->len;
699 dev->stats.tx_packets++;
6bd52143 700 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
14fb64e1
YH
701 read_unlock(&mrt_lock);
702 kfree_skb(skb);
6ed10654 703 return NETDEV_TX_OK;
14fb64e1
YH
704}
705
007c3838
SH
706static const struct net_device_ops reg_vif_netdev_ops = {
707 .ndo_start_xmit = reg_vif_xmit,
708};
709
14fb64e1
YH
710static void reg_vif_setup(struct net_device *dev)
711{
712 dev->type = ARPHRD_PIMREG;
713 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
714 dev->flags = IFF_NOARP;
007c3838 715 dev->netdev_ops = &reg_vif_netdev_ops;
14fb64e1 716 dev->destructor = free_netdev;
403dbb97 717 dev->features |= NETIF_F_NETNS_LOCAL;
14fb64e1
YH
718}
719
d1db275d 720static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
14fb64e1
YH
721{
722 struct net_device *dev;
d1db275d
PM
723 char name[IFNAMSIZ];
724
725 if (mrt->id == RT6_TABLE_DFLT)
726 sprintf(name, "pim6reg");
727 else
728 sprintf(name, "pim6reg%u", mrt->id);
14fb64e1 729
d1db275d 730 dev = alloc_netdev(0, name, reg_vif_setup);
14fb64e1
YH
731 if (dev == NULL)
732 return NULL;
733
8229efda
BT
734 dev_net_set(dev, net);
735
14fb64e1
YH
736 if (register_netdevice(dev)) {
737 free_netdev(dev);
738 return NULL;
739 }
740 dev->iflink = 0;
741
14fb64e1
YH
742 if (dev_open(dev))
743 goto failure;
744
7af3db78 745 dev_hold(dev);
14fb64e1
YH
746 return dev;
747
748failure:
749 /* allow the register to be completed before unregistering. */
750 rtnl_unlock();
751 rtnl_lock();
752
753 unregister_netdevice(dev);
754 return NULL;
755}
756#endif
757
7bc570c8
YH
758/*
759 * Delete a VIF entry
760 */
761
6bd52143 762static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
7bc570c8
YH
763{
764 struct mif_device *v;
765 struct net_device *dev;
1d6e55f1 766 struct inet6_dev *in6_dev;
6bd52143
PM
767
768 if (vifi < 0 || vifi >= mrt->maxvif)
7bc570c8
YH
769 return -EADDRNOTAVAIL;
770
6bd52143 771 v = &mrt->vif6_table[vifi];
7bc570c8
YH
772
773 write_lock_bh(&mrt_lock);
774 dev = v->dev;
775 v->dev = NULL;
776
777 if (!dev) {
778 write_unlock_bh(&mrt_lock);
779 return -EADDRNOTAVAIL;
780 }
781
14fb64e1 782#ifdef CONFIG_IPV6_PIMSM_V2
6bd52143
PM
783 if (vifi == mrt->mroute_reg_vif_num)
784 mrt->mroute_reg_vif_num = -1;
14fb64e1
YH
785#endif
786
6bd52143 787 if (vifi + 1 == mrt->maxvif) {
7bc570c8
YH
788 int tmp;
789 for (tmp = vifi - 1; tmp >= 0; tmp--) {
6bd52143 790 if (MIF_EXISTS(mrt, tmp))
7bc570c8
YH
791 break;
792 }
6bd52143 793 mrt->maxvif = tmp + 1;
7bc570c8
YH
794 }
795
796 write_unlock_bh(&mrt_lock);
797
798 dev_set_allmulti(dev, -1);
799
1d6e55f1
TG
800 in6_dev = __in6_dev_get(dev);
801 if (in6_dev)
802 in6_dev->cnf.mc_forwarding--;
803
7bc570c8 804 if (v->flags & MIFF_REGISTER)
c871e664 805 unregister_netdevice_queue(dev, head);
7bc570c8
YH
806
807 dev_put(dev);
808 return 0;
809}
810
58701ad4
BT
811static inline void ip6mr_cache_free(struct mfc6_cache *c)
812{
58701ad4
BT
813 kmem_cache_free(mrt_cachep, c);
814}
815
7bc570c8
YH
816/* Destroy an unresolved cache entry, killing queued skbs
817 and reporting error to netlink readers.
818 */
819
6bd52143 820static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
7bc570c8 821{
6bd52143 822 struct net *net = read_pnet(&mrt->net);
7bc570c8
YH
823 struct sk_buff *skb;
824
6bd52143 825 atomic_dec(&mrt->cache_resolve_queue_len);
7bc570c8
YH
826
827 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
828 if (ipv6_hdr(skb)->version == 0) {
829 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
830 nlh->nlmsg_type = NLMSG_ERROR;
831 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
832 skb_trim(skb, nlh->nlmsg_len);
833 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
8229efda 834 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
7bc570c8
YH
835 } else
836 kfree_skb(skb);
837 }
838
58701ad4 839 ip6mr_cache_free(c);
7bc570c8
YH
840}
841
842
c476efbc 843/* Timer process for all the unresolved queue. */
7bc570c8 844
6bd52143 845static void ipmr_do_expire_process(struct mr6_table *mrt)
7bc570c8
YH
846{
847 unsigned long now = jiffies;
848 unsigned long expires = 10 * HZ;
f30a7784 849 struct mfc6_cache *c, *next;
7bc570c8 850
6bd52143 851 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
7bc570c8
YH
852 if (time_after(c->mfc_un.unres.expires, now)) {
853 /* not yet... */
854 unsigned long interval = c->mfc_un.unres.expires - now;
855 if (interval < expires)
856 expires = interval;
7bc570c8
YH
857 continue;
858 }
859
f30a7784 860 list_del(&c->list);
6bd52143 861 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
862 }
863
6bd52143
PM
864 if (!list_empty(&mrt->mfc6_unres_queue))
865 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
7bc570c8
YH
866}
867
c476efbc 868static void ipmr_expire_process(unsigned long arg)
7bc570c8 869{
6bd52143 870 struct mr6_table *mrt = (struct mr6_table *)arg;
c476efbc 871
7bc570c8 872 if (!spin_trylock(&mfc_unres_lock)) {
6bd52143 873 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
7bc570c8
YH
874 return;
875 }
876
6bd52143
PM
877 if (!list_empty(&mrt->mfc6_unres_queue))
878 ipmr_do_expire_process(mrt);
7bc570c8
YH
879
880 spin_unlock(&mfc_unres_lock);
881}
882
883/* Fill oifs list. It is called under write locked mrt_lock. */
884
6bd52143 885static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
b5aa30b1 886 unsigned char *ttls)
7bc570c8
YH
887{
888 int vifi;
889
6ac7eb08 890 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 891 cache->mfc_un.res.maxvif = 0;
6ac7eb08 892 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8 893
6bd52143
PM
894 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
895 if (MIF_EXISTS(mrt, vifi) &&
4e16880c 896 ttls[vifi] && ttls[vifi] < 255) {
7bc570c8
YH
897 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
898 if (cache->mfc_un.res.minvif > vifi)
899 cache->mfc_un.res.minvif = vifi;
900 if (cache->mfc_un.res.maxvif <= vifi)
901 cache->mfc_un.res.maxvif = vifi + 1;
902 }
903 }
904}
905
6bd52143
PM
906static int mif6_add(struct net *net, struct mr6_table *mrt,
907 struct mif6ctl *vifc, int mrtsock)
7bc570c8
YH
908{
909 int vifi = vifc->mif6c_mifi;
6bd52143 910 struct mif_device *v = &mrt->vif6_table[vifi];
7bc570c8 911 struct net_device *dev;
1d6e55f1 912 struct inet6_dev *in6_dev;
5ae7b444 913 int err;
7bc570c8
YH
914
915 /* Is vif busy ? */
6bd52143 916 if (MIF_EXISTS(mrt, vifi))
7bc570c8
YH
917 return -EADDRINUSE;
918
919 switch (vifc->mif6c_flags) {
14fb64e1
YH
920#ifdef CONFIG_IPV6_PIMSM_V2
921 case MIFF_REGISTER:
922 /*
923 * Special Purpose VIF in PIM
924 * All the packets will be sent to the daemon
925 */
6bd52143 926 if (mrt->mroute_reg_vif_num >= 0)
14fb64e1 927 return -EADDRINUSE;
d1db275d 928 dev = ip6mr_reg_vif(net, mrt);
14fb64e1
YH
929 if (!dev)
930 return -ENOBUFS;
5ae7b444
WC
931 err = dev_set_allmulti(dev, 1);
932 if (err) {
933 unregister_netdevice(dev);
7af3db78 934 dev_put(dev);
5ae7b444
WC
935 return err;
936 }
14fb64e1
YH
937 break;
938#endif
7bc570c8 939 case 0:
8229efda 940 dev = dev_get_by_index(net, vifc->mif6c_pifi);
7bc570c8
YH
941 if (!dev)
942 return -EADDRNOTAVAIL;
5ae7b444 943 err = dev_set_allmulti(dev, 1);
7af3db78
WC
944 if (err) {
945 dev_put(dev);
5ae7b444 946 return err;
7af3db78 947 }
7bc570c8
YH
948 break;
949 default:
950 return -EINVAL;
951 }
952
1d6e55f1
TG
953 in6_dev = __in6_dev_get(dev);
954 if (in6_dev)
955 in6_dev->cnf.mc_forwarding++;
956
7bc570c8
YH
957 /*
958 * Fill in the VIF structures
959 */
960 v->rate_limit = vifc->vifc_rate_limit;
961 v->flags = vifc->mif6c_flags;
962 if (!mrtsock)
963 v->flags |= VIFF_STATIC;
964 v->threshold = vifc->vifc_threshold;
965 v->bytes_in = 0;
966 v->bytes_out = 0;
967 v->pkt_in = 0;
968 v->pkt_out = 0;
969 v->link = dev->ifindex;
970 if (v->flags & MIFF_REGISTER)
971 v->link = dev->iflink;
972
973 /* And finish update writing critical data */
974 write_lock_bh(&mrt_lock);
7bc570c8 975 v->dev = dev;
14fb64e1
YH
976#ifdef CONFIG_IPV6_PIMSM_V2
977 if (v->flags & MIFF_REGISTER)
6bd52143 978 mrt->mroute_reg_vif_num = vifi;
14fb64e1 979#endif
6bd52143
PM
980 if (vifi + 1 > mrt->maxvif)
981 mrt->maxvif = vifi + 1;
7bc570c8
YH
982 write_unlock_bh(&mrt_lock);
983 return 0;
984}
985
6bd52143 986static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
8229efda
BT
987 struct in6_addr *origin,
988 struct in6_addr *mcastgrp)
7bc570c8
YH
989{
990 int line = MFC6_HASH(mcastgrp, origin);
991 struct mfc6_cache *c;
992
6bd52143 993 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8
YH
994 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
995 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
f30a7784 996 return c;
7bc570c8 997 }
f30a7784 998 return NULL;
7bc570c8
YH
999}
1000
1001/*
1002 * Allocate a multicast cache entry
1003 */
b5aa30b1 1004static struct mfc6_cache *ip6mr_cache_alloc(void)
7bc570c8 1005{
36cbac59 1006 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
7bc570c8
YH
1007 if (c == NULL)
1008 return NULL;
6ac7eb08 1009 c->mfc_un.res.minvif = MAXMIFS;
7bc570c8
YH
1010 return c;
1011}
1012
b5aa30b1 1013static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
7bc570c8 1014{
36cbac59 1015 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
7bc570c8
YH
1016 if (c == NULL)
1017 return NULL;
7bc570c8
YH
1018 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1019 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1020 return c;
1021}
1022
1023/*
1024 * A cache entry has gone into a resolved state from queued
1025 */
1026
6bd52143
PM
1027static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1028 struct mfc6_cache *uc, struct mfc6_cache *c)
7bc570c8
YH
1029{
1030 struct sk_buff *skb;
1031
1032 /*
1033 * Play the pending entries through our router
1034 */
1035
1036 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1037 if (ipv6_hdr(skb)->version == 0) {
1038 int err;
1039 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1040
6bd52143 1041 if (ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
549e028d 1042 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
1043 } else {
1044 nlh->nlmsg_type = NLMSG_ERROR;
1045 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1046 skb_trim(skb, nlh->nlmsg_len);
1047 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1048 }
b5aa30b1 1049 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
7bc570c8 1050 } else
6bd52143 1051 ip6_mr_forward(net, mrt, skb, c);
7bc570c8
YH
1052 }
1053}
1054
1055/*
1056 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1057 * expects the following bizarre scheme.
1058 *
1059 * Called under mrt_lock.
1060 */
1061
6bd52143
PM
1062static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1063 mifi_t mifi, int assert)
7bc570c8
YH
1064{
1065 struct sk_buff *skb;
1066 struct mrt6msg *msg;
1067 int ret;
1068
14fb64e1
YH
1069#ifdef CONFIG_IPV6_PIMSM_V2
1070 if (assert == MRT6MSG_WHOLEPKT)
1071 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1072 +sizeof(*msg));
1073 else
1074#endif
1075 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
1076
1077 if (!skb)
1078 return -ENOBUFS;
1079
1080 /* I suppose that internal messages
1081 * do not require checksums */
1082
1083 skb->ip_summed = CHECKSUM_UNNECESSARY;
1084
14fb64e1
YH
1085#ifdef CONFIG_IPV6_PIMSM_V2
1086 if (assert == MRT6MSG_WHOLEPKT) {
1087 /* Ugly, but we have no choice with this interface.
1088 Duplicate old header, fix length etc.
1089 And all this only to mangle msg->im6_msgtype and
1090 to set msg->im6_mbz to "mbz" :-)
1091 */
1092 skb_push(skb, -skb_network_offset(pkt));
1093
1094 skb_push(skb, sizeof(*msg));
1095 skb_reset_transport_header(skb);
1096 msg = (struct mrt6msg *)skb_transport_header(skb);
1097 msg->im6_mbz = 0;
1098 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
6bd52143 1099 msg->im6_mif = mrt->mroute_reg_vif_num;
14fb64e1
YH
1100 msg->im6_pad = 0;
1101 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1102 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1103
1104 skb->ip_summed = CHECKSUM_UNNECESSARY;
1105 } else
1106#endif
1107 {
7bc570c8
YH
1108 /*
1109 * Copy the IP header
1110 */
1111
1112 skb_put(skb, sizeof(struct ipv6hdr));
1113 skb_reset_network_header(skb);
1114 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1115
1116 /*
1117 * Add our header
1118 */
1119 skb_put(skb, sizeof(*msg));
1120 skb_reset_transport_header(skb);
1121 msg = (struct mrt6msg *)skb_transport_header(skb);
1122
1123 msg->im6_mbz = 0;
1124 msg->im6_msgtype = assert;
6ac7eb08 1125 msg->im6_mif = mifi;
7bc570c8
YH
1126 msg->im6_pad = 0;
1127 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1128 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1129
adf30907 1130 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
7bc570c8 1131 skb->ip_summed = CHECKSUM_UNNECESSARY;
14fb64e1 1132 }
7bc570c8 1133
6bd52143 1134 if (mrt->mroute6_sk == NULL) {
7bc570c8
YH
1135 kfree_skb(skb);
1136 return -EINVAL;
1137 }
1138
1139 /*
1140 * Deliver to user space multicast routing algorithms
1141 */
6bd52143 1142 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
bd91b8bf 1143 if (ret < 0) {
7bc570c8
YH
1144 if (net_ratelimit())
1145 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1146 kfree_skb(skb);
1147 }
1148
1149 return ret;
1150}
1151
1152/*
1153 * Queue a packet for resolution. It gets locked cache entry!
1154 */
1155
1156static int
6bd52143 1157ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
7bc570c8 1158{
f30a7784 1159 bool found = false;
7bc570c8
YH
1160 int err;
1161 struct mfc6_cache *c;
1162
1163 spin_lock_bh(&mfc_unres_lock);
6bd52143 1164 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
c476efbc 1165 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
f30a7784
PM
1166 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1167 found = true;
7bc570c8 1168 break;
f30a7784 1169 }
7bc570c8
YH
1170 }
1171
f30a7784 1172 if (!found) {
7bc570c8
YH
1173 /*
1174 * Create a new entry if allowable
1175 */
1176
6bd52143 1177 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
b5aa30b1 1178 (c = ip6mr_cache_alloc_unres()) == NULL) {
7bc570c8
YH
1179 spin_unlock_bh(&mfc_unres_lock);
1180
1181 kfree_skb(skb);
1182 return -ENOBUFS;
1183 }
1184
1185 /*
1186 * Fill in the new cache entry
1187 */
1188 c->mf6c_parent = -1;
1189 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1190 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1191
1192 /*
1193 * Reflect first query at pim6sd
1194 */
6bd52143 1195 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
8229efda 1196 if (err < 0) {
7bc570c8
YH
1197 /* If the report failed throw the cache entry
1198 out - Brad Parker
1199 */
1200 spin_unlock_bh(&mfc_unres_lock);
1201
58701ad4 1202 ip6mr_cache_free(c);
7bc570c8
YH
1203 kfree_skb(skb);
1204 return err;
1205 }
1206
6bd52143
PM
1207 atomic_inc(&mrt->cache_resolve_queue_len);
1208 list_add(&c->list, &mrt->mfc6_unres_queue);
7bc570c8 1209
6bd52143 1210 ipmr_do_expire_process(mrt);
7bc570c8
YH
1211 }
1212
1213 /*
1214 * See if we can append the packet
1215 */
1216 if (c->mfc_un.unres.unresolved.qlen > 3) {
1217 kfree_skb(skb);
1218 err = -ENOBUFS;
1219 } else {
1220 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1221 err = 0;
1222 }
1223
1224 spin_unlock_bh(&mfc_unres_lock);
1225 return err;
1226}
1227
1228/*
1229 * MFC6 cache manipulation by user space
1230 */
1231
6bd52143 1232static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
7bc570c8
YH
1233{
1234 int line;
f30a7784 1235 struct mfc6_cache *c, *next;
7bc570c8
YH
1236
1237 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1238
6bd52143 1239 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
7bc570c8
YH
1240 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1241 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1242 write_lock_bh(&mrt_lock);
f30a7784 1243 list_del(&c->list);
7bc570c8
YH
1244 write_unlock_bh(&mrt_lock);
1245
58701ad4 1246 ip6mr_cache_free(c);
7bc570c8
YH
1247 return 0;
1248 }
1249 }
1250 return -ENOENT;
1251}
1252
1253static int ip6mr_device_event(struct notifier_block *this,
1254 unsigned long event, void *ptr)
1255{
1256 struct net_device *dev = ptr;
8229efda 1257 struct net *net = dev_net(dev);
d1db275d 1258 struct mr6_table *mrt;
7bc570c8
YH
1259 struct mif_device *v;
1260 int ct;
c871e664 1261 LIST_HEAD(list);
7bc570c8 1262
7bc570c8
YH
1263 if (event != NETDEV_UNREGISTER)
1264 return NOTIFY_DONE;
1265
d1db275d
PM
1266 ip6mr_for_each_table(mrt, net) {
1267 v = &mrt->vif6_table[0];
1268 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1269 if (v->dev == dev)
1270 mif6_delete(mrt, ct, &list);
1271 }
7bc570c8 1272 }
c871e664
ED
1273 unregister_netdevice_many(&list);
1274
7bc570c8
YH
1275 return NOTIFY_DONE;
1276}
1277
1278static struct notifier_block ip6_mr_notifier = {
1279 .notifier_call = ip6mr_device_event
1280};
1281
1282/*
1283 * Setup for IP multicast routing
1284 */
1285
4e16880c
BT
1286static int __net_init ip6mr_net_init(struct net *net)
1287{
d1db275d 1288 int err;
f30a7784 1289
d1db275d
PM
1290 err = ip6mr_rules_init(net);
1291 if (err < 0)
4e16880c 1292 goto fail;
8b90fc7e
BT
1293
1294#ifdef CONFIG_PROC_FS
1295 err = -ENOMEM;
1296 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1297 goto proc_vif_fail;
1298 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1299 goto proc_cache_fail;
1300#endif
6bd52143 1301
4a6258a0
BT
1302 return 0;
1303
8b90fc7e
BT
1304#ifdef CONFIG_PROC_FS
1305proc_cache_fail:
1306 proc_net_remove(net, "ip6_mr_vif");
1307proc_vif_fail:
d1db275d 1308 ip6mr_rules_exit(net);
8b90fc7e 1309#endif
4e16880c
BT
1310fail:
1311 return err;
1312}
1313
1314static void __net_exit ip6mr_net_exit(struct net *net)
1315{
8b90fc7e
BT
1316#ifdef CONFIG_PROC_FS
1317 proc_net_remove(net, "ip6_mr_cache");
1318 proc_net_remove(net, "ip6_mr_vif");
1319#endif
d1db275d 1320 ip6mr_rules_exit(net);
4e16880c
BT
1321}
1322
1323static struct pernet_operations ip6mr_net_ops = {
1324 .init = ip6mr_net_init,
1325 .exit = ip6mr_net_exit,
1326};
1327
623d1a1a 1328int __init ip6_mr_init(void)
7bc570c8 1329{
623d1a1a
WC
1330 int err;
1331
7bc570c8
YH
1332 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1333 sizeof(struct mfc6_cache),
1334 0, SLAB_HWCACHE_ALIGN,
1335 NULL);
1336 if (!mrt_cachep)
623d1a1a 1337 return -ENOMEM;
7bc570c8 1338
4e16880c
BT
1339 err = register_pernet_subsys(&ip6mr_net_ops);
1340 if (err)
1341 goto reg_pernet_fail;
1342
623d1a1a
WC
1343 err = register_netdevice_notifier(&ip6_mr_notifier);
1344 if (err)
1345 goto reg_notif_fail;
403dbb97
TG
1346#ifdef CONFIG_IPV6_PIMSM_V2
1347 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1348 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1349 err = -EAGAIN;
1350 goto add_proto_fail;
1351 }
1352#endif
623d1a1a 1353 return 0;
403dbb97
TG
1354#ifdef CONFIG_IPV6_PIMSM_V2
1355add_proto_fail:
1356 unregister_netdevice_notifier(&ip6_mr_notifier);
1357#endif
87b30a65 1358reg_notif_fail:
4e16880c
BT
1359 unregister_pernet_subsys(&ip6mr_net_ops);
1360reg_pernet_fail:
87b30a65 1361 kmem_cache_destroy(mrt_cachep);
623d1a1a 1362 return err;
7bc570c8
YH
1363}
1364
623d1a1a
WC
1365void ip6_mr_cleanup(void)
1366{
623d1a1a 1367 unregister_netdevice_notifier(&ip6_mr_notifier);
4e16880c 1368 unregister_pernet_subsys(&ip6mr_net_ops);
623d1a1a
WC
1369 kmem_cache_destroy(mrt_cachep);
1370}
7bc570c8 1371
6bd52143
PM
1372static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1373 struct mf6cctl *mfc, int mrtsock)
7bc570c8 1374{
f30a7784 1375 bool found = false;
7bc570c8 1376 int line;
f30a7784 1377 struct mfc6_cache *uc, *c;
6ac7eb08 1378 unsigned char ttls[MAXMIFS];
7bc570c8
YH
1379 int i;
1380
a50436f2
PM
1381 if (mfc->mf6cc_parent >= MAXMIFS)
1382 return -ENFILE;
1383
6ac7eb08
RR
1384 memset(ttls, 255, MAXMIFS);
1385 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1386 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1387 ttls[i] = 1;
1388
1389 }
1390
1391 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1392
6bd52143 1393 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8 1394 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
f30a7784
PM
1395 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1396 found = true;
7bc570c8 1397 break;
f30a7784 1398 }
7bc570c8
YH
1399 }
1400
f30a7784 1401 if (found) {
7bc570c8
YH
1402 write_lock_bh(&mrt_lock);
1403 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1404 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1405 if (!mrtsock)
1406 c->mfc_flags |= MFC_STATIC;
1407 write_unlock_bh(&mrt_lock);
1408 return 0;
1409 }
1410
1411 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1412 return -EINVAL;
1413
b5aa30b1 1414 c = ip6mr_cache_alloc();
7bc570c8
YH
1415 if (c == NULL)
1416 return -ENOMEM;
1417
1418 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1419 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1420 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1421 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1422 if (!mrtsock)
1423 c->mfc_flags |= MFC_STATIC;
1424
1425 write_lock_bh(&mrt_lock);
6bd52143 1426 list_add(&c->list, &mrt->mfc6_cache_array[line]);
7bc570c8
YH
1427 write_unlock_bh(&mrt_lock);
1428
1429 /*
1430 * Check to see if we resolved a queued list. If so we
1431 * need to send on the frames and tidy up.
1432 */
f30a7784 1433 found = false;
7bc570c8 1434 spin_lock_bh(&mfc_unres_lock);
6bd52143 1435 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
c476efbc 1436 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
7bc570c8 1437 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
f30a7784 1438 list_del(&uc->list);
6bd52143 1439 atomic_dec(&mrt->cache_resolve_queue_len);
f30a7784 1440 found = true;
7bc570c8
YH
1441 break;
1442 }
1443 }
6bd52143
PM
1444 if (list_empty(&mrt->mfc6_unres_queue))
1445 del_timer(&mrt->ipmr_expire_timer);
7bc570c8
YH
1446 spin_unlock_bh(&mfc_unres_lock);
1447
f30a7784 1448 if (found) {
6bd52143 1449 ip6mr_cache_resolve(net, mrt, uc, c);
58701ad4 1450 ip6mr_cache_free(uc);
7bc570c8
YH
1451 }
1452 return 0;
1453}
1454
1455/*
1456 * Close the multicast socket, and clear the vif tables etc
1457 */
1458
6bd52143 1459static void mroute_clean_tables(struct mr6_table *mrt)
7bc570c8
YH
1460{
1461 int i;
c871e664 1462 LIST_HEAD(list);
f30a7784 1463 struct mfc6_cache *c, *next;
7bc570c8
YH
1464
1465 /*
1466 * Shut down all active vif entries
1467 */
6bd52143
PM
1468 for (i = 0; i < mrt->maxvif; i++) {
1469 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1470 mif6_delete(mrt, i, &list);
7bc570c8 1471 }
c871e664 1472 unregister_netdevice_many(&list);
7bc570c8
YH
1473
1474 /*
1475 * Wipe the cache
1476 */
4a6258a0 1477 for (i = 0; i < MFC6_LINES; i++) {
6bd52143 1478 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
f30a7784 1479 if (c->mfc_flags & MFC_STATIC)
7bc570c8 1480 continue;
7bc570c8 1481 write_lock_bh(&mrt_lock);
f30a7784 1482 list_del(&c->list);
7bc570c8
YH
1483 write_unlock_bh(&mrt_lock);
1484
58701ad4 1485 ip6mr_cache_free(c);
7bc570c8
YH
1486 }
1487 }
1488
6bd52143 1489 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
7bc570c8 1490 spin_lock_bh(&mfc_unres_lock);
6bd52143 1491 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
f30a7784 1492 list_del(&c->list);
6bd52143 1493 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
1494 }
1495 spin_unlock_bh(&mfc_unres_lock);
1496 }
1497}
1498
6bd52143 1499static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
7bc570c8
YH
1500{
1501 int err = 0;
8229efda 1502 struct net *net = sock_net(sk);
7bc570c8
YH
1503
1504 rtnl_lock();
1505 write_lock_bh(&mrt_lock);
6bd52143
PM
1506 if (likely(mrt->mroute6_sk == NULL)) {
1507 mrt->mroute6_sk = sk;
1d6e55f1
TG
1508 net->ipv6.devconf_all->mc_forwarding++;
1509 }
7bc570c8
YH
1510 else
1511 err = -EADDRINUSE;
1512 write_unlock_bh(&mrt_lock);
1513
1514 rtnl_unlock();
1515
1516 return err;
1517}
1518
1519int ip6mr_sk_done(struct sock *sk)
1520{
d1db275d 1521 int err = -EACCES;
8229efda 1522 struct net *net = sock_net(sk);
d1db275d 1523 struct mr6_table *mrt;
7bc570c8
YH
1524
1525 rtnl_lock();
d1db275d
PM
1526 ip6mr_for_each_table(mrt, net) {
1527 if (sk == mrt->mroute6_sk) {
1528 write_lock_bh(&mrt_lock);
1529 mrt->mroute6_sk = NULL;
1530 net->ipv6.devconf_all->mc_forwarding--;
1531 write_unlock_bh(&mrt_lock);
7bc570c8 1532
d1db275d
PM
1533 mroute_clean_tables(mrt);
1534 err = 0;
1535 break;
1536 }
1537 }
7bc570c8
YH
1538 rtnl_unlock();
1539
1540 return err;
1541}
1542
d1db275d 1543struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
6bd52143 1544{
d1db275d
PM
1545 struct mr6_table *mrt;
1546 struct flowi fl = {
1547 .iif = skb->skb_iif,
1548 .oif = skb->dev->ifindex,
1549 .mark = skb->mark,
1550 };
1551
1552 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
1553 return NULL;
6bd52143
PM
1554
1555 return mrt->mroute6_sk;
1556}
1557
7bc570c8
YH
1558/*
1559 * Socket options and virtual interface manipulation. The whole
1560 * virtual interface system is a complete heap, but unfortunately
1561 * that's how BSD mrouted happens to think. Maybe one day with a proper
1562 * MOSPF/PIM router set up we can clean this up.
1563 */
1564
b7058842 1565int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
7bc570c8
YH
1566{
1567 int ret;
1568 struct mif6ctl vif;
1569 struct mf6cctl mfc;
1570 mifi_t mifi;
8229efda 1571 struct net *net = sock_net(sk);
d1db275d
PM
1572 struct mr6_table *mrt;
1573
1574 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1575 if (mrt == NULL)
1576 return -ENOENT;
7bc570c8
YH
1577
1578 if (optname != MRT6_INIT) {
6bd52143 1579 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
7bc570c8
YH
1580 return -EACCES;
1581 }
1582
1583 switch (optname) {
1584 case MRT6_INIT:
1585 if (sk->sk_type != SOCK_RAW ||
c720c7e8 1586 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
7bc570c8
YH
1587 return -EOPNOTSUPP;
1588 if (optlen < sizeof(int))
1589 return -EINVAL;
1590
6bd52143 1591 return ip6mr_sk_init(mrt, sk);
7bc570c8
YH
1592
1593 case MRT6_DONE:
1594 return ip6mr_sk_done(sk);
1595
1596 case MRT6_ADD_MIF:
1597 if (optlen < sizeof(vif))
1598 return -EINVAL;
1599 if (copy_from_user(&vif, optval, sizeof(vif)))
1600 return -EFAULT;
6ac7eb08 1601 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1602 return -ENFILE;
1603 rtnl_lock();
6bd52143 1604 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
7bc570c8
YH
1605 rtnl_unlock();
1606 return ret;
1607
1608 case MRT6_DEL_MIF:
1609 if (optlen < sizeof(mifi_t))
1610 return -EINVAL;
1611 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1612 return -EFAULT;
1613 rtnl_lock();
6bd52143 1614 ret = mif6_delete(mrt, mifi, NULL);
7bc570c8
YH
1615 rtnl_unlock();
1616 return ret;
1617
1618 /*
1619 * Manipulate the forwarding caches. These live
1620 * in a sort of kernel/user symbiosis.
1621 */
1622 case MRT6_ADD_MFC:
1623 case MRT6_DEL_MFC:
1624 if (optlen < sizeof(mfc))
1625 return -EINVAL;
1626 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1627 return -EFAULT;
1628 rtnl_lock();
1629 if (optname == MRT6_DEL_MFC)
6bd52143 1630 ret = ip6mr_mfc_delete(mrt, &mfc);
7bc570c8 1631 else
6bd52143 1632 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
7bc570c8
YH
1633 rtnl_unlock();
1634 return ret;
1635
14fb64e1
YH
1636 /*
1637 * Control PIM assert (to activate pim will activate assert)
1638 */
1639 case MRT6_ASSERT:
1640 {
1641 int v;
1642 if (get_user(v, (int __user *)optval))
1643 return -EFAULT;
6bd52143 1644 mrt->mroute_do_assert = !!v;
14fb64e1
YH
1645 return 0;
1646 }
1647
1648#ifdef CONFIG_IPV6_PIMSM_V2
1649 case MRT6_PIM:
1650 {
a9f83bf3 1651 int v;
14fb64e1
YH
1652 if (get_user(v, (int __user *)optval))
1653 return -EFAULT;
1654 v = !!v;
1655 rtnl_lock();
1656 ret = 0;
6bd52143
PM
1657 if (v != mrt->mroute_do_pim) {
1658 mrt->mroute_do_pim = v;
1659 mrt->mroute_do_assert = v;
14fb64e1
YH
1660 }
1661 rtnl_unlock();
1662 return ret;
1663 }
1664
d1db275d
PM
1665#endif
1666#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1667 case MRT6_TABLE:
1668 {
1669 u32 v;
1670
1671 if (optlen != sizeof(u32))
1672 return -EINVAL;
1673 if (get_user(v, (u32 __user *)optval))
1674 return -EFAULT;
1675 if (sk == mrt->mroute6_sk)
1676 return -EBUSY;
1677
1678 rtnl_lock();
1679 ret = 0;
1680 if (!ip6mr_new_table(net, v))
1681 ret = -ENOMEM;
1682 raw6_sk(sk)->ip6mr_table = v;
1683 rtnl_unlock();
1684 return ret;
1685 }
14fb64e1 1686#endif
7bc570c8 1687 /*
7d120c55 1688 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1689 * set.
1690 */
1691 default:
1692 return -ENOPROTOOPT;
1693 }
1694}
1695
1696/*
1697 * Getsock opt support for the multicast routing system.
1698 */
1699
1700int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1701 int __user *optlen)
1702{
1703 int olr;
1704 int val;
8229efda 1705 struct net *net = sock_net(sk);
d1db275d
PM
1706 struct mr6_table *mrt;
1707
1708 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1709 if (mrt == NULL)
1710 return -ENOENT;
7bc570c8
YH
1711
1712 switch (optname) {
1713 case MRT6_VERSION:
1714 val = 0x0305;
1715 break;
14fb64e1
YH
1716#ifdef CONFIG_IPV6_PIMSM_V2
1717 case MRT6_PIM:
6bd52143 1718 val = mrt->mroute_do_pim;
14fb64e1
YH
1719 break;
1720#endif
1721 case MRT6_ASSERT:
6bd52143 1722 val = mrt->mroute_do_assert;
14fb64e1 1723 break;
7bc570c8
YH
1724 default:
1725 return -ENOPROTOOPT;
1726 }
1727
1728 if (get_user(olr, optlen))
1729 return -EFAULT;
1730
1731 olr = min_t(int, olr, sizeof(int));
1732 if (olr < 0)
1733 return -EINVAL;
1734
1735 if (put_user(olr, optlen))
1736 return -EFAULT;
1737 if (copy_to_user(optval, &val, olr))
1738 return -EFAULT;
1739 return 0;
1740}
1741
1742/*
1743 * The IP multicast ioctl support routines.
1744 */
1745
1746int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1747{
1748 struct sioc_sg_req6 sr;
1749 struct sioc_mif_req6 vr;
1750 struct mif_device *vif;
1751 struct mfc6_cache *c;
8229efda 1752 struct net *net = sock_net(sk);
d1db275d
PM
1753 struct mr6_table *mrt;
1754
1755 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1756 if (mrt == NULL)
1757 return -ENOENT;
7bc570c8
YH
1758
1759 switch (cmd) {
1760 case SIOCGETMIFCNT_IN6:
1761 if (copy_from_user(&vr, arg, sizeof(vr)))
1762 return -EFAULT;
6bd52143 1763 if (vr.mifi >= mrt->maxvif)
7bc570c8
YH
1764 return -EINVAL;
1765 read_lock(&mrt_lock);
6bd52143
PM
1766 vif = &mrt->vif6_table[vr.mifi];
1767 if (MIF_EXISTS(mrt, vr.mifi)) {
7bc570c8
YH
1768 vr.icount = vif->pkt_in;
1769 vr.ocount = vif->pkt_out;
1770 vr.ibytes = vif->bytes_in;
1771 vr.obytes = vif->bytes_out;
1772 read_unlock(&mrt_lock);
1773
1774 if (copy_to_user(arg, &vr, sizeof(vr)))
1775 return -EFAULT;
1776 return 0;
1777 }
1778 read_unlock(&mrt_lock);
1779 return -EADDRNOTAVAIL;
1780 case SIOCGETSGCNT_IN6:
1781 if (copy_from_user(&sr, arg, sizeof(sr)))
1782 return -EFAULT;
1783
1784 read_lock(&mrt_lock);
6bd52143 1785 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
7bc570c8
YH
1786 if (c) {
1787 sr.pktcnt = c->mfc_un.res.pkt;
1788 sr.bytecnt = c->mfc_un.res.bytes;
1789 sr.wrong_if = c->mfc_un.res.wrong_if;
1790 read_unlock(&mrt_lock);
1791
1792 if (copy_to_user(arg, &sr, sizeof(sr)))
1793 return -EFAULT;
1794 return 0;
1795 }
1796 read_unlock(&mrt_lock);
1797 return -EADDRNOTAVAIL;
1798 default:
1799 return -ENOIOCTLCMD;
1800 }
1801}
1802
1803
1804static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1805{
adf30907 1806 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
483a47d2 1807 IPSTATS_MIB_OUTFORWDATAGRAMS);
7bc570c8
YH
1808 return dst_output(skb);
1809}
1810
1811/*
1812 * Processing handlers for ip6mr_forward
1813 */
1814
6bd52143
PM
1815static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1816 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
7bc570c8
YH
1817{
1818 struct ipv6hdr *ipv6h;
6bd52143 1819 struct mif_device *vif = &mrt->vif6_table[vifi];
7bc570c8
YH
1820 struct net_device *dev;
1821 struct dst_entry *dst;
1822 struct flowi fl;
1823
1824 if (vif->dev == NULL)
1825 goto out_free;
1826
14fb64e1
YH
1827#ifdef CONFIG_IPV6_PIMSM_V2
1828 if (vif->flags & MIFF_REGISTER) {
1829 vif->pkt_out++;
1830 vif->bytes_out += skb->len;
dc58c78c
PE
1831 vif->dev->stats.tx_bytes += skb->len;
1832 vif->dev->stats.tx_packets++;
6bd52143 1833 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
8da73b73 1834 goto out_free;
14fb64e1
YH
1835 }
1836#endif
1837
7bc570c8
YH
1838 ipv6h = ipv6_hdr(skb);
1839
1840 fl = (struct flowi) {
1841 .oif = vif->link,
1842 .nl_u = { .ip6_u =
1843 { .daddr = ipv6h->daddr, }
1844 }
1845 };
1846
8229efda 1847 dst = ip6_route_output(net, NULL, &fl);
7bc570c8
YH
1848 if (!dst)
1849 goto out_free;
1850
adf30907
ED
1851 skb_dst_drop(skb);
1852 skb_dst_set(skb, dst);
7bc570c8
YH
1853
1854 /*
1855 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1856 * not only before forwarding, but after forwarding on all output
1857 * interfaces. It is clear, if mrouter runs a multicasting
1858 * program, it should receive packets not depending to what interface
1859 * program is joined.
1860 * If we will not make it, the program will have to join on all
1861 * interfaces. On the other hand, multihoming host (or router, but
1862 * not mrouter) cannot join to more than one interface - it will
1863 * result in receiving multiple packets.
1864 */
1865 dev = vif->dev;
1866 skb->dev = dev;
1867 vif->pkt_out++;
1868 vif->bytes_out += skb->len;
1869
1870 /* We are about to write */
1871 /* XXX: extension headers? */
1872 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1873 goto out_free;
1874
1875 ipv6h = ipv6_hdr(skb);
1876 ipv6h->hop_limit--;
1877
1878 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1879
b2e0b385 1880 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
7bc570c8
YH
1881 ip6mr_forward2_finish);
1882
1883out_free:
1884 kfree_skb(skb);
1885 return 0;
1886}
1887
6bd52143 1888static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
7bc570c8
YH
1889{
1890 int ct;
6bd52143
PM
1891
1892 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1893 if (mrt->vif6_table[ct].dev == dev)
7bc570c8
YH
1894 break;
1895 }
1896 return ct;
1897}
1898
6bd52143
PM
1899static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1900 struct sk_buff *skb, struct mfc6_cache *cache)
7bc570c8
YH
1901{
1902 int psend = -1;
1903 int vif, ct;
1904
1905 vif = cache->mf6c_parent;
1906 cache->mfc_un.res.pkt++;
1907 cache->mfc_un.res.bytes += skb->len;
1908
14fb64e1
YH
1909 /*
1910 * Wrong interface: drop packet and (maybe) send PIM assert.
1911 */
6bd52143 1912 if (mrt->vif6_table[vif].dev != skb->dev) {
14fb64e1
YH
1913 int true_vifi;
1914
1915 cache->mfc_un.res.wrong_if++;
6bd52143 1916 true_vifi = ip6mr_find_vif(mrt, skb->dev);
14fb64e1 1917
6bd52143 1918 if (true_vifi >= 0 && mrt->mroute_do_assert &&
14fb64e1
YH
1919 /* pimsm uses asserts, when switching from RPT to SPT,
1920 so that we cannot check that packet arrived on an oif.
1921 It is bad, but otherwise we would need to move pretty
1922 large chunk of pimd to kernel. Ough... --ANK
1923 */
6bd52143 1924 (mrt->mroute_do_pim ||
a21f3f99 1925 cache->mfc_un.res.ttls[true_vifi] < 255) &&
14fb64e1
YH
1926 time_after(jiffies,
1927 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1928 cache->mfc_un.res.last_assert = jiffies;
6bd52143 1929 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
14fb64e1
YH
1930 }
1931 goto dont_forward;
1932 }
1933
6bd52143
PM
1934 mrt->vif6_table[vif].pkt_in++;
1935 mrt->vif6_table[vif].bytes_in += skb->len;
7bc570c8
YH
1936
1937 /*
1938 * Forward the frame
1939 */
1940 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1941 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1942 if (psend != -1) {
1943 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1944 if (skb2)
6bd52143 1945 ip6mr_forward2(net, mrt, skb2, cache, psend);
7bc570c8
YH
1946 }
1947 psend = ct;
1948 }
1949 }
1950 if (psend != -1) {
6bd52143 1951 ip6mr_forward2(net, mrt, skb, cache, psend);
7bc570c8
YH
1952 return 0;
1953 }
1954
14fb64e1 1955dont_forward:
7bc570c8
YH
1956 kfree_skb(skb);
1957 return 0;
1958}
1959
1960
1961/*
1962 * Multicast packets for forwarding arrive here
1963 */
1964
1965int ip6_mr_input(struct sk_buff *skb)
1966{
1967 struct mfc6_cache *cache;
8229efda 1968 struct net *net = dev_net(skb->dev);
d1db275d
PM
1969 struct mr6_table *mrt;
1970 struct flowi fl = {
1971 .iif = skb->dev->ifindex,
1972 .mark = skb->mark,
1973 };
1974 int err;
1975
1976 err = ip6mr_fib_lookup(net, &fl, &mrt);
1977 if (err < 0)
1978 return err;
7bc570c8
YH
1979
1980 read_lock(&mrt_lock);
6bd52143 1981 cache = ip6mr_cache_find(mrt,
8229efda 1982 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
7bc570c8
YH
1983
1984 /*
1985 * No usable cache entry
1986 */
1987 if (cache == NULL) {
1988 int vif;
1989
6bd52143 1990 vif = ip6mr_find_vif(mrt, skb->dev);
7bc570c8 1991 if (vif >= 0) {
6bd52143 1992 int err = ip6mr_cache_unresolved(mrt, vif, skb);
7bc570c8
YH
1993 read_unlock(&mrt_lock);
1994
1995 return err;
1996 }
1997 read_unlock(&mrt_lock);
1998 kfree_skb(skb);
1999 return -ENODEV;
2000 }
2001
6bd52143 2002 ip6_mr_forward(net, mrt, skb, cache);
7bc570c8
YH
2003
2004 read_unlock(&mrt_lock);
2005
2006 return 0;
2007}
2008
2009
2010static int
6bd52143
PM
2011ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2012 struct mfc6_cache *c, struct rtmsg *rtm)
7bc570c8
YH
2013{
2014 int ct;
2015 struct rtnexthop *nhp;
549e028d 2016 u8 *b = skb_tail_pointer(skb);
7bc570c8
YH
2017 struct rtattr *mp_head;
2018
7438189b
ND
2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS)
2021 return -ENOENT;
2022
6bd52143
PM
2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
2024 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
7bc570c8
YH
2025
2026 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2027
2028 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
6bd52143 2029 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
7bc570c8
YH
2030 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2031 goto rtattr_failure;
2032 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2033 nhp->rtnh_flags = 0;
2034 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
6bd52143 2035 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
7bc570c8
YH
2036 nhp->rtnh_len = sizeof(*nhp);
2037 }
2038 }
2039 mp_head->rta_type = RTA_MULTIPATH;
549e028d 2040 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
7bc570c8
YH
2041 rtm->rtm_type = RTN_MULTICAST;
2042 return 1;
2043
2044rtattr_failure:
2045 nlmsg_trim(skb, b);
2046 return -EMSGSIZE;
2047}
2048
8229efda
BT
2049int ip6mr_get_route(struct net *net,
2050 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
7bc570c8
YH
2051{
2052 int err;
d1db275d 2053 struct mr6_table *mrt;
7bc570c8 2054 struct mfc6_cache *cache;
adf30907 2055 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
7bc570c8 2056
d1db275d
PM
2057 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2058 if (mrt == NULL)
2059 return -ENOENT;
2060
7bc570c8 2061 read_lock(&mrt_lock);
6bd52143 2062 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
7bc570c8
YH
2063
2064 if (!cache) {
2065 struct sk_buff *skb2;
2066 struct ipv6hdr *iph;
2067 struct net_device *dev;
2068 int vif;
2069
2070 if (nowait) {
2071 read_unlock(&mrt_lock);
2072 return -EAGAIN;
2073 }
2074
2075 dev = skb->dev;
6bd52143 2076 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
7bc570c8
YH
2077 read_unlock(&mrt_lock);
2078 return -ENODEV;
2079 }
2080
2081 /* really correct? */
2082 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2083 if (!skb2) {
2084 read_unlock(&mrt_lock);
2085 return -ENOMEM;
2086 }
2087
2088 skb_reset_transport_header(skb2);
2089
2090 skb_put(skb2, sizeof(struct ipv6hdr));
2091 skb_reset_network_header(skb2);
2092
2093 iph = ipv6_hdr(skb2);
2094 iph->version = 0;
2095 iph->priority = 0;
2096 iph->flow_lbl[0] = 0;
2097 iph->flow_lbl[1] = 0;
2098 iph->flow_lbl[2] = 0;
2099 iph->payload_len = 0;
2100 iph->nexthdr = IPPROTO_NONE;
2101 iph->hop_limit = 0;
2102 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2103 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2104
6bd52143 2105 err = ip6mr_cache_unresolved(mrt, vif, skb2);
7bc570c8
YH
2106 read_unlock(&mrt_lock);
2107
2108 return err;
2109 }
2110
2111 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2112 cache->mfc_flags |= MFC_NOTIFY;
2113
6bd52143 2114 err = ip6mr_fill_mroute(mrt, skb, cache, rtm);
7bc570c8
YH
2115 read_unlock(&mrt_lock);
2116 return err;
2117}
2118