8139too: fix buffer overrun in rtl8139_init_board
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
7bc570c8
YH
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
7bc570c8
YH
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
7bc570c8 35#include <linux/init.h>
5a0e3ad6 36#include <linux/slab.h>
7bc570c8
YH
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
7bc570c8 40#include <net/raw.h>
7bc570c8
YH
41#include <linux/notifier.h>
42#include <linux/if_arp.h>
7bc570c8
YH
43#include <net/checksum.h>
44#include <net/netlink.h>
d1db275d 45#include <net/fib_rules.h>
7bc570c8
YH
46
47#include <net/ipv6.h>
48#include <net/ip6_route.h>
49#include <linux/mroute6.h>
14fb64e1 50#include <linux/pim.h>
7bc570c8
YH
51#include <net/addrconf.h>
52#include <linux/netfilter_ipv6.h>
5d6e430d 53#include <net/ip6_checksum.h>
7bc570c8 54
6bd52143 55struct mr6_table {
d1db275d 56 struct list_head list;
6bd52143
PM
57#ifdef CONFIG_NET_NS
58 struct net *net;
59#endif
d1db275d 60 u32 id;
6bd52143
PM
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
66 int maxvif;
67 atomic_t cache_resolve_queue_len;
68 int mroute_do_assert;
69 int mroute_do_pim;
70#ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
72#endif
73};
74
d1db275d
PM
75struct ip6mr_rule {
76 struct fib_rule common;
77};
78
79struct ip6mr_result {
80 struct mr6_table *mrt;
81};
82
7bc570c8
YH
83/* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
85 */
86
87static DEFINE_RWLOCK(mrt_lock);
88
89/*
90 * Multicast router control variables
91 */
92
6bd52143 93#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
7bc570c8 94
7bc570c8
YH
95/* Special spinlock for queue of unresolved entries */
96static DEFINE_SPINLOCK(mfc_unres_lock);
97
98/* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
102
103 In this case data path is free of exclusive locks at all.
104 */
105
106static struct kmem_cache *mrt_cachep __read_mostly;
107
d1db275d
PM
108static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109static void ip6mr_free_table(struct mr6_table *mrt);
110
6bd52143
PM
111static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
8229efda 114 mifi_t mifi, int assert);
5b285cac
PM
115static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 struct mfc6_cache *c, struct rtmsg *rtm);
117static int ip6mr_rtm_dumproute(struct sk_buff *skb,
118 struct netlink_callback *cb);
6bd52143 119static void mroute_clean_tables(struct mr6_table *mrt);
d1db275d
PM
120static void ipmr_expire_process(unsigned long arg);
121
122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
8ffb335e 123#define ip6mr_for_each_table(mrt, net) \
d1db275d
PM
124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
125
126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
127{
128 struct mr6_table *mrt;
129
130 ip6mr_for_each_table(mrt, net) {
131 if (mrt->id == id)
132 return mrt;
133 }
134 return NULL;
135}
136
137static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
138 struct mr6_table **mrt)
139{
140 struct ip6mr_result res;
141 struct fib_lookup_arg arg = { .result = &res, };
142 int err;
143
144 err = fib_rules_lookup(net->ipv6.mr6_rules_ops, flp, 0, &arg);
145 if (err < 0)
146 return err;
147 *mrt = res.mrt;
148 return 0;
149}
150
151static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 int flags, struct fib_lookup_arg *arg)
153{
154 struct ip6mr_result *res = arg->result;
155 struct mr6_table *mrt;
156
157 switch (rule->action) {
158 case FR_ACT_TO_TBL:
159 break;
160 case FR_ACT_UNREACHABLE:
161 return -ENETUNREACH;
162 case FR_ACT_PROHIBIT:
163 return -EACCES;
164 case FR_ACT_BLACKHOLE:
165 default:
166 return -EINVAL;
167 }
168
169 mrt = ip6mr_get_table(rule->fr_net, rule->table);
170 if (mrt == NULL)
171 return -EAGAIN;
172 res->mrt = mrt;
173 return 0;
174}
175
176static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
177{
178 return 1;
179}
180
181static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
182 FRA_GENERIC_POLICY,
183};
184
185static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
186 struct fib_rule_hdr *frh, struct nlattr **tb)
187{
188 return 0;
189}
190
191static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
192 struct nlattr **tb)
193{
194 return 1;
195}
196
197static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
198 struct fib_rule_hdr *frh)
199{
200 frh->dst_len = 0;
201 frh->src_len = 0;
202 frh->tos = 0;
203 return 0;
204}
205
206static const struct fib_rules_ops __net_initdata ip6mr_rules_ops_template = {
207 .family = RTNL_FAMILY_IP6MR,
208 .rule_size = sizeof(struct ip6mr_rule),
209 .addr_size = sizeof(struct in6_addr),
210 .action = ip6mr_rule_action,
211 .match = ip6mr_rule_match,
212 .configure = ip6mr_rule_configure,
213 .compare = ip6mr_rule_compare,
214 .default_pref = fib_default_rule_pref,
215 .fill = ip6mr_rule_fill,
216 .nlgroup = RTNLGRP_IPV6_RULE,
217 .policy = ip6mr_rule_policy,
218 .owner = THIS_MODULE,
219};
220
221static int __net_init ip6mr_rules_init(struct net *net)
222{
223 struct fib_rules_ops *ops;
224 struct mr6_table *mrt;
225 int err;
226
227 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
228 if (IS_ERR(ops))
229 return PTR_ERR(ops);
230
231 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
232
233 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
234 if (mrt == NULL) {
235 err = -ENOMEM;
236 goto err1;
237 }
238
239 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
240 if (err < 0)
241 goto err2;
242
243 net->ipv6.mr6_rules_ops = ops;
244 return 0;
245
246err2:
247 kfree(mrt);
248err1:
249 fib_rules_unregister(ops);
250 return err;
251}
252
253static void __net_exit ip6mr_rules_exit(struct net *net)
254{
255 struct mr6_table *mrt, *next;
256
257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list)
258 ip6mr_free_table(mrt);
259 fib_rules_unregister(net->ipv6.mr6_rules_ops);
260}
261#else
262#define ip6mr_for_each_table(mrt, net) \
263 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
264
265static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
266{
267 return net->ipv6.mrt6;
268}
269
270static int ip6mr_fib_lookup(struct net *net, struct flowi *flp,
271 struct mr6_table **mrt)
272{
273 *mrt = net->ipv6.mrt6;
274 return 0;
275}
276
277static int __net_init ip6mr_rules_init(struct net *net)
278{
279 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
280 return net->ipv6.mrt6 ? 0 : -ENOMEM;
281}
282
283static void __net_exit ip6mr_rules_exit(struct net *net)
284{
285 ip6mr_free_table(net->ipv6.mrt6);
286}
287#endif
288
289static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
290{
291 struct mr6_table *mrt;
292 unsigned int i;
293
294 mrt = ip6mr_get_table(net, id);
295 if (mrt != NULL)
296 return mrt;
297
298 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
299 if (mrt == NULL)
300 return NULL;
301 mrt->id = id;
302 write_pnet(&mrt->net, net);
303
304 /* Forwarding cache */
305 for (i = 0; i < MFC6_LINES; i++)
306 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
307
308 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
309
310 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
311 (unsigned long)mrt);
312
313#ifdef CONFIG_IPV6_PIMSM_V2
314 mrt->mroute_reg_vif_num = -1;
315#endif
316#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
317 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
318#endif
319 return mrt;
320}
7bc570c8 321
d1db275d
PM
322static void ip6mr_free_table(struct mr6_table *mrt)
323{
324 del_timer(&mrt->ipmr_expire_timer);
325 mroute_clean_tables(mrt);
326 kfree(mrt);
327}
7bc570c8
YH
328
329#ifdef CONFIG_PROC_FS
330
331struct ipmr_mfc_iter {
8b90fc7e 332 struct seq_net_private p;
d1db275d 333 struct mr6_table *mrt;
f30a7784 334 struct list_head *cache;
7bc570c8
YH
335 int ct;
336};
337
338
8b90fc7e
BT
339static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
340 struct ipmr_mfc_iter *it, loff_t pos)
7bc570c8 341{
d1db275d 342 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
343 struct mfc6_cache *mfc;
344
7bc570c8 345 read_lock(&mrt_lock);
f30a7784 346 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
6bd52143 347 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784 348 list_for_each_entry(mfc, it->cache, list)
7bc570c8
YH
349 if (pos-- == 0)
350 return mfc;
f30a7784 351 }
7bc570c8
YH
352 read_unlock(&mrt_lock);
353
7bc570c8 354 spin_lock_bh(&mfc_unres_lock);
6bd52143 355 it->cache = &mrt->mfc6_unres_queue;
f30a7784 356 list_for_each_entry(mfc, it->cache, list)
c476efbc 357 if (pos-- == 0)
7bc570c8
YH
358 return mfc;
359 spin_unlock_bh(&mfc_unres_lock);
360
361 it->cache = NULL;
362 return NULL;
363}
364
7bc570c8
YH
365/*
366 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
367 */
368
369struct ipmr_vif_iter {
8b90fc7e 370 struct seq_net_private p;
d1db275d 371 struct mr6_table *mrt;
7bc570c8
YH
372 int ct;
373};
374
8b90fc7e
BT
375static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
376 struct ipmr_vif_iter *iter,
7bc570c8
YH
377 loff_t pos)
378{
d1db275d 379 struct mr6_table *mrt = iter->mrt;
6bd52143
PM
380
381 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
382 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8
YH
383 continue;
384 if (pos-- == 0)
6bd52143 385 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
386 }
387 return NULL;
388}
389
390static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
391 __acquires(mrt_lock)
392{
d1db275d 393 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 394 struct net *net = seq_file_net(seq);
d1db275d
PM
395 struct mr6_table *mrt;
396
397 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
398 if (mrt == NULL)
399 return ERR_PTR(-ENOENT);
400
401 iter->mrt = mrt;
8b90fc7e 402
7bc570c8 403 read_lock(&mrt_lock);
8b90fc7e
BT
404 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
405 : SEQ_START_TOKEN;
7bc570c8
YH
406}
407
408static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
409{
410 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 411 struct net *net = seq_file_net(seq);
d1db275d 412 struct mr6_table *mrt = iter->mrt;
7bc570c8
YH
413
414 ++*pos;
415 if (v == SEQ_START_TOKEN)
8b90fc7e 416 return ip6mr_vif_seq_idx(net, iter, 0);
7bc570c8 417
6bd52143
PM
418 while (++iter->ct < mrt->maxvif) {
419 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8 420 continue;
6bd52143 421 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
422 }
423 return NULL;
424}
425
426static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
427 __releases(mrt_lock)
428{
429 read_unlock(&mrt_lock);
430}
431
432static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
433{
d1db275d
PM
434 struct ipmr_vif_iter *iter = seq->private;
435 struct mr6_table *mrt = iter->mrt;
8b90fc7e 436
7bc570c8
YH
437 if (v == SEQ_START_TOKEN) {
438 seq_puts(seq,
439 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
440 } else {
441 const struct mif_device *vif = v;
442 const char *name = vif->dev ? vif->dev->name : "none";
443
444 seq_printf(seq,
d430a227 445 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
6bd52143 446 vif - mrt->vif6_table,
7bc570c8
YH
447 name, vif->bytes_in, vif->pkt_in,
448 vif->bytes_out, vif->pkt_out,
449 vif->flags);
450 }
451 return 0;
452}
453
98147d52 454static const struct seq_operations ip6mr_vif_seq_ops = {
7bc570c8
YH
455 .start = ip6mr_vif_seq_start,
456 .next = ip6mr_vif_seq_next,
457 .stop = ip6mr_vif_seq_stop,
458 .show = ip6mr_vif_seq_show,
459};
460
461static int ip6mr_vif_open(struct inode *inode, struct file *file)
462{
8b90fc7e
BT
463 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
464 sizeof(struct ipmr_vif_iter));
7bc570c8
YH
465}
466
5ca1b998 467static const struct file_operations ip6mr_vif_fops = {
7bc570c8
YH
468 .owner = THIS_MODULE,
469 .open = ip6mr_vif_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
8b90fc7e 472 .release = seq_release_net,
7bc570c8
YH
473};
474
475static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
476{
d1db275d 477 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 478 struct net *net = seq_file_net(seq);
d1db275d 479 struct mr6_table *mrt;
8b90fc7e 480
d1db275d
PM
481 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
482 if (mrt == NULL)
483 return ERR_PTR(-ENOENT);
484
485 it->mrt = mrt;
8b90fc7e
BT
486 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
487 : SEQ_START_TOKEN;
7bc570c8
YH
488}
489
490static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
491{
492 struct mfc6_cache *mfc = v;
493 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 494 struct net *net = seq_file_net(seq);
d1db275d 495 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
496
497 ++*pos;
498
499 if (v == SEQ_START_TOKEN)
8b90fc7e 500 return ipmr_mfc_seq_idx(net, seq->private, 0);
7bc570c8 501
f30a7784
PM
502 if (mfc->list.next != it->cache)
503 return list_entry(mfc->list.next, struct mfc6_cache, list);
7bc570c8 504
6bd52143 505 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8
YH
506 goto end_of_list;
507
6bd52143 508 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
7bc570c8 509
4a6258a0 510 while (++it->ct < MFC6_LINES) {
6bd52143 511 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784
PM
512 if (list_empty(it->cache))
513 continue;
514 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
515 }
516
517 /* exhausted cache_array, show unresolved */
518 read_unlock(&mrt_lock);
6bd52143 519 it->cache = &mrt->mfc6_unres_queue;
7bc570c8
YH
520 it->ct = 0;
521
522 spin_lock_bh(&mfc_unres_lock);
f30a7784
PM
523 if (!list_empty(it->cache))
524 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
525
526 end_of_list:
527 spin_unlock_bh(&mfc_unres_lock);
528 it->cache = NULL;
529
530 return NULL;
531}
532
533static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
534{
535 struct ipmr_mfc_iter *it = seq->private;
d1db275d 536 struct mr6_table *mrt = it->mrt;
7bc570c8 537
6bd52143 538 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8 539 spin_unlock_bh(&mfc_unres_lock);
6bd52143 540 else if (it->cache == mrt->mfc6_cache_array)
7bc570c8
YH
541 read_unlock(&mrt_lock);
542}
543
544static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
545{
546 int n;
547
548 if (v == SEQ_START_TOKEN) {
549 seq_puts(seq,
550 "Group "
551 "Origin "
552 "Iif Pkts Bytes Wrong Oifs\n");
553 } else {
554 const struct mfc6_cache *mfc = v;
555 const struct ipmr_mfc_iter *it = seq->private;
d1db275d 556 struct mr6_table *mrt = it->mrt;
7bc570c8 557
999890b2 558 seq_printf(seq, "%pI6 %pI6 %-3hd",
0c6ce78a 559 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
1ea472e2 560 mfc->mf6c_parent);
7bc570c8 561
6bd52143 562 if (it->cache != &mrt->mfc6_unres_queue) {
1ea472e2
BT
563 seq_printf(seq, " %8lu %8lu %8lu",
564 mfc->mfc_un.res.pkt,
565 mfc->mfc_un.res.bytes,
566 mfc->mfc_un.res.wrong_if);
7bc570c8
YH
567 for (n = mfc->mfc_un.res.minvif;
568 n < mfc->mfc_un.res.maxvif; n++) {
6bd52143 569 if (MIF_EXISTS(mrt, n) &&
7bc570c8
YH
570 mfc->mfc_un.res.ttls[n] < 255)
571 seq_printf(seq,
572 " %2d:%-3d",
573 n, mfc->mfc_un.res.ttls[n]);
574 }
1ea472e2
BT
575 } else {
576 /* unresolved mfc_caches don't contain
577 * pkt, bytes and wrong_if values
578 */
579 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
7bc570c8
YH
580 }
581 seq_putc(seq, '\n');
582 }
583 return 0;
584}
585
88e9d34c 586static const struct seq_operations ipmr_mfc_seq_ops = {
7bc570c8
YH
587 .start = ipmr_mfc_seq_start,
588 .next = ipmr_mfc_seq_next,
589 .stop = ipmr_mfc_seq_stop,
590 .show = ipmr_mfc_seq_show,
591};
592
593static int ipmr_mfc_open(struct inode *inode, struct file *file)
594{
8b90fc7e
BT
595 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
596 sizeof(struct ipmr_mfc_iter));
7bc570c8
YH
597}
598
5ca1b998 599static const struct file_operations ip6mr_mfc_fops = {
7bc570c8
YH
600 .owner = THIS_MODULE,
601 .open = ipmr_mfc_open,
602 .read = seq_read,
603 .llseek = seq_lseek,
8b90fc7e 604 .release = seq_release_net,
7bc570c8
YH
605};
606#endif
607
14fb64e1 608#ifdef CONFIG_IPV6_PIMSM_V2
14fb64e1
YH
609
610static int pim6_rcv(struct sk_buff *skb)
611{
612 struct pimreghdr *pim;
613 struct ipv6hdr *encap;
614 struct net_device *reg_dev = NULL;
8229efda 615 struct net *net = dev_net(skb->dev);
d1db275d
PM
616 struct mr6_table *mrt;
617 struct flowi fl = {
618 .iif = skb->dev->ifindex,
619 .mark = skb->mark,
620 };
621 int reg_vif_num;
14fb64e1
YH
622
623 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
624 goto drop;
625
626 pim = (struct pimreghdr *)skb_transport_header(skb);
627 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
628 (pim->flags & PIM_NULL_REGISTER) ||
1d6e55f1
TG
629 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
630 sizeof(*pim), IPPROTO_PIM,
631 csum_partial((void *)pim, sizeof(*pim), 0)) &&
ec6b486f 632 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
633 goto drop;
634
635 /* check if the inner packet is destined to mcast group */
636 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
637 sizeof(*pim));
638
639 if (!ipv6_addr_is_multicast(&encap->daddr) ||
640 encap->payload_len == 0 ||
641 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
642 goto drop;
643
d1db275d
PM
644 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
645 goto drop;
646 reg_vif_num = mrt->mroute_reg_vif_num;
647
14fb64e1
YH
648 read_lock(&mrt_lock);
649 if (reg_vif_num >= 0)
6bd52143 650 reg_dev = mrt->vif6_table[reg_vif_num].dev;
14fb64e1
YH
651 if (reg_dev)
652 dev_hold(reg_dev);
653 read_unlock(&mrt_lock);
654
655 if (reg_dev == NULL)
656 goto drop;
657
658 skb->mac_header = skb->network_header;
659 skb_pull(skb, (u8 *)encap - skb->data);
660 skb_reset_network_header(skb);
1d6e55f1 661 skb->protocol = htons(ETH_P_IPV6);
14fb64e1
YH
662 skb->ip_summed = 0;
663 skb->pkt_type = PACKET_HOST;
d19d56dd
ED
664
665 skb_tunnel_rx(skb, reg_dev);
666
14fb64e1
YH
667 netif_rx(skb);
668 dev_put(reg_dev);
669 return 0;
670 drop:
671 kfree_skb(skb);
672 return 0;
673}
674
41135cc8 675static const struct inet6_protocol pim6_protocol = {
14fb64e1
YH
676 .handler = pim6_rcv,
677};
678
679/* Service routines creating virtual interfaces: PIMREG */
680
6fef4c0c
SH
681static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
682 struct net_device *dev)
14fb64e1 683{
8229efda 684 struct net *net = dev_net(dev);
d1db275d
PM
685 struct mr6_table *mrt;
686 struct flowi fl = {
687 .oif = dev->ifindex,
688 .iif = skb->skb_iif,
689 .mark = skb->mark,
690 };
691 int err;
692
693 err = ip6mr_fib_lookup(net, &fl, &mrt);
694 if (err < 0)
695 return err;
8229efda 696
14fb64e1 697 read_lock(&mrt_lock);
dc58c78c
PE
698 dev->stats.tx_bytes += skb->len;
699 dev->stats.tx_packets++;
6bd52143 700 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
14fb64e1
YH
701 read_unlock(&mrt_lock);
702 kfree_skb(skb);
6ed10654 703 return NETDEV_TX_OK;
14fb64e1
YH
704}
705
007c3838
SH
706static const struct net_device_ops reg_vif_netdev_ops = {
707 .ndo_start_xmit = reg_vif_xmit,
708};
709
14fb64e1
YH
710static void reg_vif_setup(struct net_device *dev)
711{
712 dev->type = ARPHRD_PIMREG;
713 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
714 dev->flags = IFF_NOARP;
007c3838 715 dev->netdev_ops = &reg_vif_netdev_ops;
14fb64e1 716 dev->destructor = free_netdev;
403dbb97 717 dev->features |= NETIF_F_NETNS_LOCAL;
14fb64e1
YH
718}
719
d1db275d 720static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
14fb64e1
YH
721{
722 struct net_device *dev;
d1db275d
PM
723 char name[IFNAMSIZ];
724
725 if (mrt->id == RT6_TABLE_DFLT)
726 sprintf(name, "pim6reg");
727 else
728 sprintf(name, "pim6reg%u", mrt->id);
14fb64e1 729
d1db275d 730 dev = alloc_netdev(0, name, reg_vif_setup);
14fb64e1
YH
731 if (dev == NULL)
732 return NULL;
733
8229efda
BT
734 dev_net_set(dev, net);
735
14fb64e1
YH
736 if (register_netdevice(dev)) {
737 free_netdev(dev);
738 return NULL;
739 }
740 dev->iflink = 0;
741
14fb64e1
YH
742 if (dev_open(dev))
743 goto failure;
744
7af3db78 745 dev_hold(dev);
14fb64e1
YH
746 return dev;
747
748failure:
749 /* allow the register to be completed before unregistering. */
750 rtnl_unlock();
751 rtnl_lock();
752
753 unregister_netdevice(dev);
754 return NULL;
755}
756#endif
757
7bc570c8
YH
758/*
759 * Delete a VIF entry
760 */
761
6bd52143 762static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
7bc570c8
YH
763{
764 struct mif_device *v;
765 struct net_device *dev;
1d6e55f1 766 struct inet6_dev *in6_dev;
6bd52143
PM
767
768 if (vifi < 0 || vifi >= mrt->maxvif)
7bc570c8
YH
769 return -EADDRNOTAVAIL;
770
6bd52143 771 v = &mrt->vif6_table[vifi];
7bc570c8
YH
772
773 write_lock_bh(&mrt_lock);
774 dev = v->dev;
775 v->dev = NULL;
776
777 if (!dev) {
778 write_unlock_bh(&mrt_lock);
779 return -EADDRNOTAVAIL;
780 }
781
14fb64e1 782#ifdef CONFIG_IPV6_PIMSM_V2
6bd52143
PM
783 if (vifi == mrt->mroute_reg_vif_num)
784 mrt->mroute_reg_vif_num = -1;
14fb64e1
YH
785#endif
786
6bd52143 787 if (vifi + 1 == mrt->maxvif) {
7bc570c8
YH
788 int tmp;
789 for (tmp = vifi - 1; tmp >= 0; tmp--) {
6bd52143 790 if (MIF_EXISTS(mrt, tmp))
7bc570c8
YH
791 break;
792 }
6bd52143 793 mrt->maxvif = tmp + 1;
7bc570c8
YH
794 }
795
796 write_unlock_bh(&mrt_lock);
797
798 dev_set_allmulti(dev, -1);
799
1d6e55f1
TG
800 in6_dev = __in6_dev_get(dev);
801 if (in6_dev)
802 in6_dev->cnf.mc_forwarding--;
803
7bc570c8 804 if (v->flags & MIFF_REGISTER)
c871e664 805 unregister_netdevice_queue(dev, head);
7bc570c8
YH
806
807 dev_put(dev);
808 return 0;
809}
810
58701ad4
BT
811static inline void ip6mr_cache_free(struct mfc6_cache *c)
812{
58701ad4
BT
813 kmem_cache_free(mrt_cachep, c);
814}
815
7bc570c8
YH
816/* Destroy an unresolved cache entry, killing queued skbs
817 and reporting error to netlink readers.
818 */
819
6bd52143 820static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
7bc570c8 821{
6bd52143 822 struct net *net = read_pnet(&mrt->net);
7bc570c8
YH
823 struct sk_buff *skb;
824
6bd52143 825 atomic_dec(&mrt->cache_resolve_queue_len);
7bc570c8
YH
826
827 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
828 if (ipv6_hdr(skb)->version == 0) {
829 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
830 nlh->nlmsg_type = NLMSG_ERROR;
831 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
832 skb_trim(skb, nlh->nlmsg_len);
833 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
8229efda 834 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
7bc570c8
YH
835 } else
836 kfree_skb(skb);
837 }
838
58701ad4 839 ip6mr_cache_free(c);
7bc570c8
YH
840}
841
842
c476efbc 843/* Timer process for all the unresolved queue. */
7bc570c8 844
6bd52143 845static void ipmr_do_expire_process(struct mr6_table *mrt)
7bc570c8
YH
846{
847 unsigned long now = jiffies;
848 unsigned long expires = 10 * HZ;
f30a7784 849 struct mfc6_cache *c, *next;
7bc570c8 850
6bd52143 851 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
7bc570c8
YH
852 if (time_after(c->mfc_un.unres.expires, now)) {
853 /* not yet... */
854 unsigned long interval = c->mfc_un.unres.expires - now;
855 if (interval < expires)
856 expires = interval;
7bc570c8
YH
857 continue;
858 }
859
f30a7784 860 list_del(&c->list);
6bd52143 861 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
862 }
863
6bd52143
PM
864 if (!list_empty(&mrt->mfc6_unres_queue))
865 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
7bc570c8
YH
866}
867
c476efbc 868static void ipmr_expire_process(unsigned long arg)
7bc570c8 869{
6bd52143 870 struct mr6_table *mrt = (struct mr6_table *)arg;
c476efbc 871
7bc570c8 872 if (!spin_trylock(&mfc_unres_lock)) {
6bd52143 873 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
7bc570c8
YH
874 return;
875 }
876
6bd52143
PM
877 if (!list_empty(&mrt->mfc6_unres_queue))
878 ipmr_do_expire_process(mrt);
7bc570c8
YH
879
880 spin_unlock(&mfc_unres_lock);
881}
882
883/* Fill oifs list. It is called under write locked mrt_lock. */
884
6bd52143 885static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
b5aa30b1 886 unsigned char *ttls)
7bc570c8
YH
887{
888 int vifi;
889
6ac7eb08 890 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 891 cache->mfc_un.res.maxvif = 0;
6ac7eb08 892 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8 893
6bd52143
PM
894 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
895 if (MIF_EXISTS(mrt, vifi) &&
4e16880c 896 ttls[vifi] && ttls[vifi] < 255) {
7bc570c8
YH
897 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
898 if (cache->mfc_un.res.minvif > vifi)
899 cache->mfc_un.res.minvif = vifi;
900 if (cache->mfc_un.res.maxvif <= vifi)
901 cache->mfc_un.res.maxvif = vifi + 1;
902 }
903 }
904}
905
6bd52143
PM
906static int mif6_add(struct net *net, struct mr6_table *mrt,
907 struct mif6ctl *vifc, int mrtsock)
7bc570c8
YH
908{
909 int vifi = vifc->mif6c_mifi;
6bd52143 910 struct mif_device *v = &mrt->vif6_table[vifi];
7bc570c8 911 struct net_device *dev;
1d6e55f1 912 struct inet6_dev *in6_dev;
5ae7b444 913 int err;
7bc570c8
YH
914
915 /* Is vif busy ? */
6bd52143 916 if (MIF_EXISTS(mrt, vifi))
7bc570c8
YH
917 return -EADDRINUSE;
918
919 switch (vifc->mif6c_flags) {
14fb64e1
YH
920#ifdef CONFIG_IPV6_PIMSM_V2
921 case MIFF_REGISTER:
922 /*
923 * Special Purpose VIF in PIM
924 * All the packets will be sent to the daemon
925 */
6bd52143 926 if (mrt->mroute_reg_vif_num >= 0)
14fb64e1 927 return -EADDRINUSE;
d1db275d 928 dev = ip6mr_reg_vif(net, mrt);
14fb64e1
YH
929 if (!dev)
930 return -ENOBUFS;
5ae7b444
WC
931 err = dev_set_allmulti(dev, 1);
932 if (err) {
933 unregister_netdevice(dev);
7af3db78 934 dev_put(dev);
5ae7b444
WC
935 return err;
936 }
14fb64e1
YH
937 break;
938#endif
7bc570c8 939 case 0:
8229efda 940 dev = dev_get_by_index(net, vifc->mif6c_pifi);
7bc570c8
YH
941 if (!dev)
942 return -EADDRNOTAVAIL;
5ae7b444 943 err = dev_set_allmulti(dev, 1);
7af3db78
WC
944 if (err) {
945 dev_put(dev);
5ae7b444 946 return err;
7af3db78 947 }
7bc570c8
YH
948 break;
949 default:
950 return -EINVAL;
951 }
952
1d6e55f1
TG
953 in6_dev = __in6_dev_get(dev);
954 if (in6_dev)
955 in6_dev->cnf.mc_forwarding++;
956
7bc570c8
YH
957 /*
958 * Fill in the VIF structures
959 */
960 v->rate_limit = vifc->vifc_rate_limit;
961 v->flags = vifc->mif6c_flags;
962 if (!mrtsock)
963 v->flags |= VIFF_STATIC;
964 v->threshold = vifc->vifc_threshold;
965 v->bytes_in = 0;
966 v->bytes_out = 0;
967 v->pkt_in = 0;
968 v->pkt_out = 0;
969 v->link = dev->ifindex;
970 if (v->flags & MIFF_REGISTER)
971 v->link = dev->iflink;
972
973 /* And finish update writing critical data */
974 write_lock_bh(&mrt_lock);
7bc570c8 975 v->dev = dev;
14fb64e1
YH
976#ifdef CONFIG_IPV6_PIMSM_V2
977 if (v->flags & MIFF_REGISTER)
6bd52143 978 mrt->mroute_reg_vif_num = vifi;
14fb64e1 979#endif
6bd52143
PM
980 if (vifi + 1 > mrt->maxvif)
981 mrt->maxvif = vifi + 1;
7bc570c8
YH
982 write_unlock_bh(&mrt_lock);
983 return 0;
984}
985
6bd52143 986static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
8229efda
BT
987 struct in6_addr *origin,
988 struct in6_addr *mcastgrp)
7bc570c8
YH
989{
990 int line = MFC6_HASH(mcastgrp, origin);
991 struct mfc6_cache *c;
992
6bd52143 993 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8
YH
994 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
995 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
f30a7784 996 return c;
7bc570c8 997 }
f30a7784 998 return NULL;
7bc570c8
YH
999}
1000
1001/*
1002 * Allocate a multicast cache entry
1003 */
b5aa30b1 1004static struct mfc6_cache *ip6mr_cache_alloc(void)
7bc570c8 1005{
36cbac59 1006 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
7bc570c8
YH
1007 if (c == NULL)
1008 return NULL;
6ac7eb08 1009 c->mfc_un.res.minvif = MAXMIFS;
7bc570c8
YH
1010 return c;
1011}
1012
b5aa30b1 1013static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
7bc570c8 1014{
36cbac59 1015 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
7bc570c8
YH
1016 if (c == NULL)
1017 return NULL;
7bc570c8
YH
1018 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1019 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1020 return c;
1021}
1022
1023/*
1024 * A cache entry has gone into a resolved state from queued
1025 */
1026
6bd52143
PM
1027static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1028 struct mfc6_cache *uc, struct mfc6_cache *c)
7bc570c8
YH
1029{
1030 struct sk_buff *skb;
1031
1032 /*
1033 * Play the pending entries through our router
1034 */
1035
1036 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1037 if (ipv6_hdr(skb)->version == 0) {
1038 int err;
1039 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1040
5b285cac 1041 if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
549e028d 1042 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
1043 } else {
1044 nlh->nlmsg_type = NLMSG_ERROR;
1045 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
1046 skb_trim(skb, nlh->nlmsg_len);
1047 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
1048 }
b5aa30b1 1049 err = rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
7bc570c8 1050 } else
6bd52143 1051 ip6_mr_forward(net, mrt, skb, c);
7bc570c8
YH
1052 }
1053}
1054
1055/*
1056 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1057 * expects the following bizarre scheme.
1058 *
1059 * Called under mrt_lock.
1060 */
1061
6bd52143
PM
1062static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1063 mifi_t mifi, int assert)
7bc570c8
YH
1064{
1065 struct sk_buff *skb;
1066 struct mrt6msg *msg;
1067 int ret;
1068
14fb64e1
YH
1069#ifdef CONFIG_IPV6_PIMSM_V2
1070 if (assert == MRT6MSG_WHOLEPKT)
1071 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1072 +sizeof(*msg));
1073 else
1074#endif
1075 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
1076
1077 if (!skb)
1078 return -ENOBUFS;
1079
1080 /* I suppose that internal messages
1081 * do not require checksums */
1082
1083 skb->ip_summed = CHECKSUM_UNNECESSARY;
1084
14fb64e1
YH
1085#ifdef CONFIG_IPV6_PIMSM_V2
1086 if (assert == MRT6MSG_WHOLEPKT) {
1087 /* Ugly, but we have no choice with this interface.
1088 Duplicate old header, fix length etc.
1089 And all this only to mangle msg->im6_msgtype and
1090 to set msg->im6_mbz to "mbz" :-)
1091 */
1092 skb_push(skb, -skb_network_offset(pkt));
1093
1094 skb_push(skb, sizeof(*msg));
1095 skb_reset_transport_header(skb);
1096 msg = (struct mrt6msg *)skb_transport_header(skb);
1097 msg->im6_mbz = 0;
1098 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
6bd52143 1099 msg->im6_mif = mrt->mroute_reg_vif_num;
14fb64e1
YH
1100 msg->im6_pad = 0;
1101 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1102 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1103
1104 skb->ip_summed = CHECKSUM_UNNECESSARY;
1105 } else
1106#endif
1107 {
7bc570c8
YH
1108 /*
1109 * Copy the IP header
1110 */
1111
1112 skb_put(skb, sizeof(struct ipv6hdr));
1113 skb_reset_network_header(skb);
1114 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1115
1116 /*
1117 * Add our header
1118 */
1119 skb_put(skb, sizeof(*msg));
1120 skb_reset_transport_header(skb);
1121 msg = (struct mrt6msg *)skb_transport_header(skb);
1122
1123 msg->im6_mbz = 0;
1124 msg->im6_msgtype = assert;
6ac7eb08 1125 msg->im6_mif = mifi;
7bc570c8
YH
1126 msg->im6_pad = 0;
1127 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
1128 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
1129
adf30907 1130 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
7bc570c8 1131 skb->ip_summed = CHECKSUM_UNNECESSARY;
14fb64e1 1132 }
7bc570c8 1133
6bd52143 1134 if (mrt->mroute6_sk == NULL) {
7bc570c8
YH
1135 kfree_skb(skb);
1136 return -EINVAL;
1137 }
1138
1139 /*
1140 * Deliver to user space multicast routing algorithms
1141 */
6bd52143 1142 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
bd91b8bf 1143 if (ret < 0) {
7bc570c8
YH
1144 if (net_ratelimit())
1145 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
1146 kfree_skb(skb);
1147 }
1148
1149 return ret;
1150}
1151
1152/*
1153 * Queue a packet for resolution. It gets locked cache entry!
1154 */
1155
1156static int
6bd52143 1157ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
7bc570c8 1158{
f30a7784 1159 bool found = false;
7bc570c8
YH
1160 int err;
1161 struct mfc6_cache *c;
1162
1163 spin_lock_bh(&mfc_unres_lock);
6bd52143 1164 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
c476efbc 1165 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
f30a7784
PM
1166 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1167 found = true;
7bc570c8 1168 break;
f30a7784 1169 }
7bc570c8
YH
1170 }
1171
f30a7784 1172 if (!found) {
7bc570c8
YH
1173 /*
1174 * Create a new entry if allowable
1175 */
1176
6bd52143 1177 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
b5aa30b1 1178 (c = ip6mr_cache_alloc_unres()) == NULL) {
7bc570c8
YH
1179 spin_unlock_bh(&mfc_unres_lock);
1180
1181 kfree_skb(skb);
1182 return -ENOBUFS;
1183 }
1184
1185 /*
1186 * Fill in the new cache entry
1187 */
1188 c->mf6c_parent = -1;
1189 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1190 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1191
1192 /*
1193 * Reflect first query at pim6sd
1194 */
6bd52143 1195 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
8229efda 1196 if (err < 0) {
7bc570c8
YH
1197 /* If the report failed throw the cache entry
1198 out - Brad Parker
1199 */
1200 spin_unlock_bh(&mfc_unres_lock);
1201
58701ad4 1202 ip6mr_cache_free(c);
7bc570c8
YH
1203 kfree_skb(skb);
1204 return err;
1205 }
1206
6bd52143
PM
1207 atomic_inc(&mrt->cache_resolve_queue_len);
1208 list_add(&c->list, &mrt->mfc6_unres_queue);
7bc570c8 1209
6bd52143 1210 ipmr_do_expire_process(mrt);
7bc570c8
YH
1211 }
1212
1213 /*
1214 * See if we can append the packet
1215 */
1216 if (c->mfc_un.unres.unresolved.qlen > 3) {
1217 kfree_skb(skb);
1218 err = -ENOBUFS;
1219 } else {
1220 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1221 err = 0;
1222 }
1223
1224 spin_unlock_bh(&mfc_unres_lock);
1225 return err;
1226}
1227
1228/*
1229 * MFC6 cache manipulation by user space
1230 */
1231
6bd52143 1232static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc)
7bc570c8
YH
1233{
1234 int line;
f30a7784 1235 struct mfc6_cache *c, *next;
7bc570c8
YH
1236
1237 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1238
6bd52143 1239 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
7bc570c8
YH
1240 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1241 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1242 write_lock_bh(&mrt_lock);
f30a7784 1243 list_del(&c->list);
7bc570c8
YH
1244 write_unlock_bh(&mrt_lock);
1245
58701ad4 1246 ip6mr_cache_free(c);
7bc570c8
YH
1247 return 0;
1248 }
1249 }
1250 return -ENOENT;
1251}
1252
1253static int ip6mr_device_event(struct notifier_block *this,
1254 unsigned long event, void *ptr)
1255{
1256 struct net_device *dev = ptr;
8229efda 1257 struct net *net = dev_net(dev);
d1db275d 1258 struct mr6_table *mrt;
7bc570c8
YH
1259 struct mif_device *v;
1260 int ct;
c871e664 1261 LIST_HEAD(list);
7bc570c8 1262
7bc570c8
YH
1263 if (event != NETDEV_UNREGISTER)
1264 return NOTIFY_DONE;
1265
d1db275d
PM
1266 ip6mr_for_each_table(mrt, net) {
1267 v = &mrt->vif6_table[0];
1268 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1269 if (v->dev == dev)
1270 mif6_delete(mrt, ct, &list);
1271 }
7bc570c8 1272 }
c871e664
ED
1273 unregister_netdevice_many(&list);
1274
7bc570c8
YH
1275 return NOTIFY_DONE;
1276}
1277
1278static struct notifier_block ip6_mr_notifier = {
1279 .notifier_call = ip6mr_device_event
1280};
1281
1282/*
1283 * Setup for IP multicast routing
1284 */
1285
4e16880c
BT
1286static int __net_init ip6mr_net_init(struct net *net)
1287{
d1db275d 1288 int err;
f30a7784 1289
d1db275d
PM
1290 err = ip6mr_rules_init(net);
1291 if (err < 0)
4e16880c 1292 goto fail;
8b90fc7e
BT
1293
1294#ifdef CONFIG_PROC_FS
1295 err = -ENOMEM;
1296 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1297 goto proc_vif_fail;
1298 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1299 goto proc_cache_fail;
1300#endif
6bd52143 1301
4a6258a0
BT
1302 return 0;
1303
8b90fc7e
BT
1304#ifdef CONFIG_PROC_FS
1305proc_cache_fail:
1306 proc_net_remove(net, "ip6_mr_vif");
1307proc_vif_fail:
d1db275d 1308 ip6mr_rules_exit(net);
8b90fc7e 1309#endif
4e16880c
BT
1310fail:
1311 return err;
1312}
1313
1314static void __net_exit ip6mr_net_exit(struct net *net)
1315{
8b90fc7e
BT
1316#ifdef CONFIG_PROC_FS
1317 proc_net_remove(net, "ip6_mr_cache");
1318 proc_net_remove(net, "ip6_mr_vif");
1319#endif
d1db275d 1320 ip6mr_rules_exit(net);
4e16880c
BT
1321}
1322
1323static struct pernet_operations ip6mr_net_ops = {
1324 .init = ip6mr_net_init,
1325 .exit = ip6mr_net_exit,
1326};
1327
623d1a1a 1328int __init ip6_mr_init(void)
7bc570c8 1329{
623d1a1a
WC
1330 int err;
1331
7bc570c8
YH
1332 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1333 sizeof(struct mfc6_cache),
1334 0, SLAB_HWCACHE_ALIGN,
1335 NULL);
1336 if (!mrt_cachep)
623d1a1a 1337 return -ENOMEM;
7bc570c8 1338
4e16880c
BT
1339 err = register_pernet_subsys(&ip6mr_net_ops);
1340 if (err)
1341 goto reg_pernet_fail;
1342
623d1a1a
WC
1343 err = register_netdevice_notifier(&ip6_mr_notifier);
1344 if (err)
1345 goto reg_notif_fail;
403dbb97
TG
1346#ifdef CONFIG_IPV6_PIMSM_V2
1347 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1348 printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
1349 err = -EAGAIN;
1350 goto add_proto_fail;
1351 }
1352#endif
5b285cac 1353 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute);
623d1a1a 1354 return 0;
403dbb97
TG
1355#ifdef CONFIG_IPV6_PIMSM_V2
1356add_proto_fail:
1357 unregister_netdevice_notifier(&ip6_mr_notifier);
1358#endif
87b30a65 1359reg_notif_fail:
4e16880c
BT
1360 unregister_pernet_subsys(&ip6mr_net_ops);
1361reg_pernet_fail:
87b30a65 1362 kmem_cache_destroy(mrt_cachep);
623d1a1a 1363 return err;
7bc570c8
YH
1364}
1365
623d1a1a
WC
1366void ip6_mr_cleanup(void)
1367{
623d1a1a 1368 unregister_netdevice_notifier(&ip6_mr_notifier);
4e16880c 1369 unregister_pernet_subsys(&ip6mr_net_ops);
623d1a1a
WC
1370 kmem_cache_destroy(mrt_cachep);
1371}
7bc570c8 1372
6bd52143
PM
1373static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1374 struct mf6cctl *mfc, int mrtsock)
7bc570c8 1375{
f30a7784 1376 bool found = false;
7bc570c8 1377 int line;
f30a7784 1378 struct mfc6_cache *uc, *c;
6ac7eb08 1379 unsigned char ttls[MAXMIFS];
7bc570c8
YH
1380 int i;
1381
a50436f2
PM
1382 if (mfc->mf6cc_parent >= MAXMIFS)
1383 return -ENFILE;
1384
6ac7eb08
RR
1385 memset(ttls, 255, MAXMIFS);
1386 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1387 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1388 ttls[i] = 1;
1389
1390 }
1391
1392 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1393
6bd52143 1394 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8 1395 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
f30a7784
PM
1396 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
1397 found = true;
7bc570c8 1398 break;
f30a7784 1399 }
7bc570c8
YH
1400 }
1401
f30a7784 1402 if (found) {
7bc570c8
YH
1403 write_lock_bh(&mrt_lock);
1404 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1405 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1406 if (!mrtsock)
1407 c->mfc_flags |= MFC_STATIC;
1408 write_unlock_bh(&mrt_lock);
1409 return 0;
1410 }
1411
1412 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1413 return -EINVAL;
1414
b5aa30b1 1415 c = ip6mr_cache_alloc();
7bc570c8
YH
1416 if (c == NULL)
1417 return -ENOMEM;
1418
1419 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1420 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1421 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1422 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1423 if (!mrtsock)
1424 c->mfc_flags |= MFC_STATIC;
1425
1426 write_lock_bh(&mrt_lock);
6bd52143 1427 list_add(&c->list, &mrt->mfc6_cache_array[line]);
7bc570c8
YH
1428 write_unlock_bh(&mrt_lock);
1429
1430 /*
1431 * Check to see if we resolved a queued list. If so we
1432 * need to send on the frames and tidy up.
1433 */
f30a7784 1434 found = false;
7bc570c8 1435 spin_lock_bh(&mfc_unres_lock);
6bd52143 1436 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
c476efbc 1437 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
7bc570c8 1438 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
f30a7784 1439 list_del(&uc->list);
6bd52143 1440 atomic_dec(&mrt->cache_resolve_queue_len);
f30a7784 1441 found = true;
7bc570c8
YH
1442 break;
1443 }
1444 }
6bd52143
PM
1445 if (list_empty(&mrt->mfc6_unres_queue))
1446 del_timer(&mrt->ipmr_expire_timer);
7bc570c8
YH
1447 spin_unlock_bh(&mfc_unres_lock);
1448
f30a7784 1449 if (found) {
6bd52143 1450 ip6mr_cache_resolve(net, mrt, uc, c);
58701ad4 1451 ip6mr_cache_free(uc);
7bc570c8
YH
1452 }
1453 return 0;
1454}
1455
1456/*
1457 * Close the multicast socket, and clear the vif tables etc
1458 */
1459
6bd52143 1460static void mroute_clean_tables(struct mr6_table *mrt)
7bc570c8
YH
1461{
1462 int i;
c871e664 1463 LIST_HEAD(list);
f30a7784 1464 struct mfc6_cache *c, *next;
7bc570c8
YH
1465
1466 /*
1467 * Shut down all active vif entries
1468 */
6bd52143
PM
1469 for (i = 0; i < mrt->maxvif; i++) {
1470 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1471 mif6_delete(mrt, i, &list);
7bc570c8 1472 }
c871e664 1473 unregister_netdevice_many(&list);
7bc570c8
YH
1474
1475 /*
1476 * Wipe the cache
1477 */
4a6258a0 1478 for (i = 0; i < MFC6_LINES; i++) {
6bd52143 1479 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
f30a7784 1480 if (c->mfc_flags & MFC_STATIC)
7bc570c8 1481 continue;
7bc570c8 1482 write_lock_bh(&mrt_lock);
f30a7784 1483 list_del(&c->list);
7bc570c8
YH
1484 write_unlock_bh(&mrt_lock);
1485
58701ad4 1486 ip6mr_cache_free(c);
7bc570c8
YH
1487 }
1488 }
1489
6bd52143 1490 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
7bc570c8 1491 spin_lock_bh(&mfc_unres_lock);
6bd52143 1492 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
f30a7784 1493 list_del(&c->list);
6bd52143 1494 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
1495 }
1496 spin_unlock_bh(&mfc_unres_lock);
1497 }
1498}
1499
6bd52143 1500static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
7bc570c8
YH
1501{
1502 int err = 0;
8229efda 1503 struct net *net = sock_net(sk);
7bc570c8
YH
1504
1505 rtnl_lock();
1506 write_lock_bh(&mrt_lock);
6bd52143
PM
1507 if (likely(mrt->mroute6_sk == NULL)) {
1508 mrt->mroute6_sk = sk;
1d6e55f1
TG
1509 net->ipv6.devconf_all->mc_forwarding++;
1510 }
7bc570c8
YH
1511 else
1512 err = -EADDRINUSE;
1513 write_unlock_bh(&mrt_lock);
1514
1515 rtnl_unlock();
1516
1517 return err;
1518}
1519
1520int ip6mr_sk_done(struct sock *sk)
1521{
d1db275d 1522 int err = -EACCES;
8229efda 1523 struct net *net = sock_net(sk);
d1db275d 1524 struct mr6_table *mrt;
7bc570c8
YH
1525
1526 rtnl_lock();
d1db275d
PM
1527 ip6mr_for_each_table(mrt, net) {
1528 if (sk == mrt->mroute6_sk) {
1529 write_lock_bh(&mrt_lock);
1530 mrt->mroute6_sk = NULL;
1531 net->ipv6.devconf_all->mc_forwarding--;
1532 write_unlock_bh(&mrt_lock);
7bc570c8 1533
d1db275d
PM
1534 mroute_clean_tables(mrt);
1535 err = 0;
1536 break;
1537 }
1538 }
7bc570c8
YH
1539 rtnl_unlock();
1540
1541 return err;
1542}
1543
d1db275d 1544struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
6bd52143 1545{
d1db275d
PM
1546 struct mr6_table *mrt;
1547 struct flowi fl = {
1548 .iif = skb->skb_iif,
1549 .oif = skb->dev->ifindex,
1550 .mark = skb->mark,
1551 };
1552
1553 if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
1554 return NULL;
6bd52143
PM
1555
1556 return mrt->mroute6_sk;
1557}
1558
7bc570c8
YH
1559/*
1560 * Socket options and virtual interface manipulation. The whole
1561 * virtual interface system is a complete heap, but unfortunately
1562 * that's how BSD mrouted happens to think. Maybe one day with a proper
1563 * MOSPF/PIM router set up we can clean this up.
1564 */
1565
b7058842 1566int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
7bc570c8
YH
1567{
1568 int ret;
1569 struct mif6ctl vif;
1570 struct mf6cctl mfc;
1571 mifi_t mifi;
8229efda 1572 struct net *net = sock_net(sk);
d1db275d
PM
1573 struct mr6_table *mrt;
1574
1575 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1576 if (mrt == NULL)
1577 return -ENOENT;
7bc570c8
YH
1578
1579 if (optname != MRT6_INIT) {
6bd52143 1580 if (sk != mrt->mroute6_sk && !capable(CAP_NET_ADMIN))
7bc570c8
YH
1581 return -EACCES;
1582 }
1583
1584 switch (optname) {
1585 case MRT6_INIT:
1586 if (sk->sk_type != SOCK_RAW ||
c720c7e8 1587 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
7bc570c8
YH
1588 return -EOPNOTSUPP;
1589 if (optlen < sizeof(int))
1590 return -EINVAL;
1591
6bd52143 1592 return ip6mr_sk_init(mrt, sk);
7bc570c8
YH
1593
1594 case MRT6_DONE:
1595 return ip6mr_sk_done(sk);
1596
1597 case MRT6_ADD_MIF:
1598 if (optlen < sizeof(vif))
1599 return -EINVAL;
1600 if (copy_from_user(&vif, optval, sizeof(vif)))
1601 return -EFAULT;
6ac7eb08 1602 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1603 return -ENFILE;
1604 rtnl_lock();
6bd52143 1605 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
7bc570c8
YH
1606 rtnl_unlock();
1607 return ret;
1608
1609 case MRT6_DEL_MIF:
1610 if (optlen < sizeof(mifi_t))
1611 return -EINVAL;
1612 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1613 return -EFAULT;
1614 rtnl_lock();
6bd52143 1615 ret = mif6_delete(mrt, mifi, NULL);
7bc570c8
YH
1616 rtnl_unlock();
1617 return ret;
1618
1619 /*
1620 * Manipulate the forwarding caches. These live
1621 * in a sort of kernel/user symbiosis.
1622 */
1623 case MRT6_ADD_MFC:
1624 case MRT6_DEL_MFC:
1625 if (optlen < sizeof(mfc))
1626 return -EINVAL;
1627 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1628 return -EFAULT;
1629 rtnl_lock();
1630 if (optname == MRT6_DEL_MFC)
6bd52143 1631 ret = ip6mr_mfc_delete(mrt, &mfc);
7bc570c8 1632 else
6bd52143 1633 ret = ip6mr_mfc_add(net, mrt, &mfc, sk == mrt->mroute6_sk);
7bc570c8
YH
1634 rtnl_unlock();
1635 return ret;
1636
14fb64e1
YH
1637 /*
1638 * Control PIM assert (to activate pim will activate assert)
1639 */
1640 case MRT6_ASSERT:
1641 {
1642 int v;
1643 if (get_user(v, (int __user *)optval))
1644 return -EFAULT;
6bd52143 1645 mrt->mroute_do_assert = !!v;
14fb64e1
YH
1646 return 0;
1647 }
1648
1649#ifdef CONFIG_IPV6_PIMSM_V2
1650 case MRT6_PIM:
1651 {
a9f83bf3 1652 int v;
14fb64e1
YH
1653 if (get_user(v, (int __user *)optval))
1654 return -EFAULT;
1655 v = !!v;
1656 rtnl_lock();
1657 ret = 0;
6bd52143
PM
1658 if (v != mrt->mroute_do_pim) {
1659 mrt->mroute_do_pim = v;
1660 mrt->mroute_do_assert = v;
14fb64e1
YH
1661 }
1662 rtnl_unlock();
1663 return ret;
1664 }
1665
d1db275d
PM
1666#endif
1667#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1668 case MRT6_TABLE:
1669 {
1670 u32 v;
1671
1672 if (optlen != sizeof(u32))
1673 return -EINVAL;
1674 if (get_user(v, (u32 __user *)optval))
1675 return -EFAULT;
1676 if (sk == mrt->mroute6_sk)
1677 return -EBUSY;
1678
1679 rtnl_lock();
1680 ret = 0;
1681 if (!ip6mr_new_table(net, v))
1682 ret = -ENOMEM;
1683 raw6_sk(sk)->ip6mr_table = v;
1684 rtnl_unlock();
1685 return ret;
1686 }
14fb64e1 1687#endif
7bc570c8 1688 /*
7d120c55 1689 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1690 * set.
1691 */
1692 default:
1693 return -ENOPROTOOPT;
1694 }
1695}
1696
1697/*
1698 * Getsock opt support for the multicast routing system.
1699 */
1700
1701int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1702 int __user *optlen)
1703{
1704 int olr;
1705 int val;
8229efda 1706 struct net *net = sock_net(sk);
d1db275d
PM
1707 struct mr6_table *mrt;
1708
1709 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1710 if (mrt == NULL)
1711 return -ENOENT;
7bc570c8
YH
1712
1713 switch (optname) {
1714 case MRT6_VERSION:
1715 val = 0x0305;
1716 break;
14fb64e1
YH
1717#ifdef CONFIG_IPV6_PIMSM_V2
1718 case MRT6_PIM:
6bd52143 1719 val = mrt->mroute_do_pim;
14fb64e1
YH
1720 break;
1721#endif
1722 case MRT6_ASSERT:
6bd52143 1723 val = mrt->mroute_do_assert;
14fb64e1 1724 break;
7bc570c8
YH
1725 default:
1726 return -ENOPROTOOPT;
1727 }
1728
1729 if (get_user(olr, optlen))
1730 return -EFAULT;
1731
1732 olr = min_t(int, olr, sizeof(int));
1733 if (olr < 0)
1734 return -EINVAL;
1735
1736 if (put_user(olr, optlen))
1737 return -EFAULT;
1738 if (copy_to_user(optval, &val, olr))
1739 return -EFAULT;
1740 return 0;
1741}
1742
1743/*
1744 * The IP multicast ioctl support routines.
1745 */
1746
1747int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1748{
1749 struct sioc_sg_req6 sr;
1750 struct sioc_mif_req6 vr;
1751 struct mif_device *vif;
1752 struct mfc6_cache *c;
8229efda 1753 struct net *net = sock_net(sk);
d1db275d
PM
1754 struct mr6_table *mrt;
1755
1756 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1757 if (mrt == NULL)
1758 return -ENOENT;
7bc570c8
YH
1759
1760 switch (cmd) {
1761 case SIOCGETMIFCNT_IN6:
1762 if (copy_from_user(&vr, arg, sizeof(vr)))
1763 return -EFAULT;
6bd52143 1764 if (vr.mifi >= mrt->maxvif)
7bc570c8
YH
1765 return -EINVAL;
1766 read_lock(&mrt_lock);
6bd52143
PM
1767 vif = &mrt->vif6_table[vr.mifi];
1768 if (MIF_EXISTS(mrt, vr.mifi)) {
7bc570c8
YH
1769 vr.icount = vif->pkt_in;
1770 vr.ocount = vif->pkt_out;
1771 vr.ibytes = vif->bytes_in;
1772 vr.obytes = vif->bytes_out;
1773 read_unlock(&mrt_lock);
1774
1775 if (copy_to_user(arg, &vr, sizeof(vr)))
1776 return -EFAULT;
1777 return 0;
1778 }
1779 read_unlock(&mrt_lock);
1780 return -EADDRNOTAVAIL;
1781 case SIOCGETSGCNT_IN6:
1782 if (copy_from_user(&sr, arg, sizeof(sr)))
1783 return -EFAULT;
1784
1785 read_lock(&mrt_lock);
6bd52143 1786 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
7bc570c8
YH
1787 if (c) {
1788 sr.pktcnt = c->mfc_un.res.pkt;
1789 sr.bytecnt = c->mfc_un.res.bytes;
1790 sr.wrong_if = c->mfc_un.res.wrong_if;
1791 read_unlock(&mrt_lock);
1792
1793 if (copy_to_user(arg, &sr, sizeof(sr)))
1794 return -EFAULT;
1795 return 0;
1796 }
1797 read_unlock(&mrt_lock);
1798 return -EADDRNOTAVAIL;
1799 default:
1800 return -ENOIOCTLCMD;
1801 }
1802}
1803
1804
1805static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1806{
adf30907 1807 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
483a47d2 1808 IPSTATS_MIB_OUTFORWDATAGRAMS);
7bc570c8
YH
1809 return dst_output(skb);
1810}
1811
1812/*
1813 * Processing handlers for ip6mr_forward
1814 */
1815
6bd52143
PM
1816static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1817 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
7bc570c8
YH
1818{
1819 struct ipv6hdr *ipv6h;
6bd52143 1820 struct mif_device *vif = &mrt->vif6_table[vifi];
7bc570c8
YH
1821 struct net_device *dev;
1822 struct dst_entry *dst;
1823 struct flowi fl;
1824
1825 if (vif->dev == NULL)
1826 goto out_free;
1827
14fb64e1
YH
1828#ifdef CONFIG_IPV6_PIMSM_V2
1829 if (vif->flags & MIFF_REGISTER) {
1830 vif->pkt_out++;
1831 vif->bytes_out += skb->len;
dc58c78c
PE
1832 vif->dev->stats.tx_bytes += skb->len;
1833 vif->dev->stats.tx_packets++;
6bd52143 1834 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
8da73b73 1835 goto out_free;
14fb64e1
YH
1836 }
1837#endif
1838
7bc570c8
YH
1839 ipv6h = ipv6_hdr(skb);
1840
1841 fl = (struct flowi) {
1842 .oif = vif->link,
1843 .nl_u = { .ip6_u =
1844 { .daddr = ipv6h->daddr, }
1845 }
1846 };
1847
8229efda 1848 dst = ip6_route_output(net, NULL, &fl);
7bc570c8
YH
1849 if (!dst)
1850 goto out_free;
1851
adf30907
ED
1852 skb_dst_drop(skb);
1853 skb_dst_set(skb, dst);
7bc570c8
YH
1854
1855 /*
1856 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1857 * not only before forwarding, but after forwarding on all output
1858 * interfaces. It is clear, if mrouter runs a multicasting
1859 * program, it should receive packets not depending to what interface
1860 * program is joined.
1861 * If we will not make it, the program will have to join on all
1862 * interfaces. On the other hand, multihoming host (or router, but
1863 * not mrouter) cannot join to more than one interface - it will
1864 * result in receiving multiple packets.
1865 */
1866 dev = vif->dev;
1867 skb->dev = dev;
1868 vif->pkt_out++;
1869 vif->bytes_out += skb->len;
1870
1871 /* We are about to write */
1872 /* XXX: extension headers? */
1873 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1874 goto out_free;
1875
1876 ipv6h = ipv6_hdr(skb);
1877 ipv6h->hop_limit--;
1878
1879 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1880
b2e0b385 1881 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
7bc570c8
YH
1882 ip6mr_forward2_finish);
1883
1884out_free:
1885 kfree_skb(skb);
1886 return 0;
1887}
1888
6bd52143 1889static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
7bc570c8
YH
1890{
1891 int ct;
6bd52143
PM
1892
1893 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
1894 if (mrt->vif6_table[ct].dev == dev)
7bc570c8
YH
1895 break;
1896 }
1897 return ct;
1898}
1899
6bd52143
PM
1900static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
1901 struct sk_buff *skb, struct mfc6_cache *cache)
7bc570c8
YH
1902{
1903 int psend = -1;
1904 int vif, ct;
1905
1906 vif = cache->mf6c_parent;
1907 cache->mfc_un.res.pkt++;
1908 cache->mfc_un.res.bytes += skb->len;
1909
14fb64e1
YH
1910 /*
1911 * Wrong interface: drop packet and (maybe) send PIM assert.
1912 */
6bd52143 1913 if (mrt->vif6_table[vif].dev != skb->dev) {
14fb64e1
YH
1914 int true_vifi;
1915
1916 cache->mfc_un.res.wrong_if++;
6bd52143 1917 true_vifi = ip6mr_find_vif(mrt, skb->dev);
14fb64e1 1918
6bd52143 1919 if (true_vifi >= 0 && mrt->mroute_do_assert &&
14fb64e1
YH
1920 /* pimsm uses asserts, when switching from RPT to SPT,
1921 so that we cannot check that packet arrived on an oif.
1922 It is bad, but otherwise we would need to move pretty
1923 large chunk of pimd to kernel. Ough... --ANK
1924 */
6bd52143 1925 (mrt->mroute_do_pim ||
a21f3f99 1926 cache->mfc_un.res.ttls[true_vifi] < 255) &&
14fb64e1
YH
1927 time_after(jiffies,
1928 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1929 cache->mfc_un.res.last_assert = jiffies;
6bd52143 1930 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
14fb64e1
YH
1931 }
1932 goto dont_forward;
1933 }
1934
6bd52143
PM
1935 mrt->vif6_table[vif].pkt_in++;
1936 mrt->vif6_table[vif].bytes_in += skb->len;
7bc570c8
YH
1937
1938 /*
1939 * Forward the frame
1940 */
1941 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1942 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1943 if (psend != -1) {
1944 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1945 if (skb2)
6bd52143 1946 ip6mr_forward2(net, mrt, skb2, cache, psend);
7bc570c8
YH
1947 }
1948 psend = ct;
1949 }
1950 }
1951 if (psend != -1) {
6bd52143 1952 ip6mr_forward2(net, mrt, skb, cache, psend);
7bc570c8
YH
1953 return 0;
1954 }
1955
14fb64e1 1956dont_forward:
7bc570c8
YH
1957 kfree_skb(skb);
1958 return 0;
1959}
1960
1961
1962/*
1963 * Multicast packets for forwarding arrive here
1964 */
1965
1966int ip6_mr_input(struct sk_buff *skb)
1967{
1968 struct mfc6_cache *cache;
8229efda 1969 struct net *net = dev_net(skb->dev);
d1db275d
PM
1970 struct mr6_table *mrt;
1971 struct flowi fl = {
1972 .iif = skb->dev->ifindex,
1973 .mark = skb->mark,
1974 };
1975 int err;
1976
1977 err = ip6mr_fib_lookup(net, &fl, &mrt);
1978 if (err < 0)
1979 return err;
7bc570c8
YH
1980
1981 read_lock(&mrt_lock);
6bd52143 1982 cache = ip6mr_cache_find(mrt,
8229efda 1983 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
7bc570c8
YH
1984
1985 /*
1986 * No usable cache entry
1987 */
1988 if (cache == NULL) {
1989 int vif;
1990
6bd52143 1991 vif = ip6mr_find_vif(mrt, skb->dev);
7bc570c8 1992 if (vif >= 0) {
6bd52143 1993 int err = ip6mr_cache_unresolved(mrt, vif, skb);
7bc570c8
YH
1994 read_unlock(&mrt_lock);
1995
1996 return err;
1997 }
1998 read_unlock(&mrt_lock);
1999 kfree_skb(skb);
2000 return -ENODEV;
2001 }
2002
6bd52143 2003 ip6_mr_forward(net, mrt, skb, cache);
7bc570c8
YH
2004
2005 read_unlock(&mrt_lock);
2006
2007 return 0;
2008}
2009
2010
5b285cac
PM
2011static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2012 struct mfc6_cache *c, struct rtmsg *rtm)
7bc570c8
YH
2013{
2014 int ct;
2015 struct rtnexthop *nhp;
549e028d 2016 u8 *b = skb_tail_pointer(skb);
7bc570c8
YH
2017 struct rtattr *mp_head;
2018
7438189b 2019 /* If cache is unresolved, don't try to parse IIF and OIF */
ed0f160a 2020 if (c->mf6c_parent >= MAXMIFS)
7438189b
ND
2021 return -ENOENT;
2022
6bd52143
PM
2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
2024 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
7bc570c8
YH
2025
2026 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2027
2028 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
6bd52143 2029 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
7bc570c8
YH
2030 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2031 goto rtattr_failure;
2032 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2033 nhp->rtnh_flags = 0;
2034 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
6bd52143 2035 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
7bc570c8
YH
2036 nhp->rtnh_len = sizeof(*nhp);
2037 }
2038 }
2039 mp_head->rta_type = RTA_MULTIPATH;
549e028d 2040 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
7bc570c8
YH
2041 rtm->rtm_type = RTN_MULTICAST;
2042 return 1;
2043
2044rtattr_failure:
2045 nlmsg_trim(skb, b);
2046 return -EMSGSIZE;
2047}
2048
8229efda
BT
2049int ip6mr_get_route(struct net *net,
2050 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
7bc570c8
YH
2051{
2052 int err;
d1db275d 2053 struct mr6_table *mrt;
7bc570c8 2054 struct mfc6_cache *cache;
adf30907 2055 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
7bc570c8 2056
d1db275d
PM
2057 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2058 if (mrt == NULL)
2059 return -ENOENT;
2060
7bc570c8 2061 read_lock(&mrt_lock);
6bd52143 2062 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
7bc570c8
YH
2063
2064 if (!cache) {
2065 struct sk_buff *skb2;
2066 struct ipv6hdr *iph;
2067 struct net_device *dev;
2068 int vif;
2069
2070 if (nowait) {
2071 read_unlock(&mrt_lock);
2072 return -EAGAIN;
2073 }
2074
2075 dev = skb->dev;
6bd52143 2076 if (dev == NULL || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
7bc570c8
YH
2077 read_unlock(&mrt_lock);
2078 return -ENODEV;
2079 }
2080
2081 /* really correct? */
2082 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2083 if (!skb2) {
2084 read_unlock(&mrt_lock);
2085 return -ENOMEM;
2086 }
2087
2088 skb_reset_transport_header(skb2);
2089
2090 skb_put(skb2, sizeof(struct ipv6hdr));
2091 skb_reset_network_header(skb2);
2092
2093 iph = ipv6_hdr(skb2);
2094 iph->version = 0;
2095 iph->priority = 0;
2096 iph->flow_lbl[0] = 0;
2097 iph->flow_lbl[1] = 0;
2098 iph->flow_lbl[2] = 0;
2099 iph->payload_len = 0;
2100 iph->nexthdr = IPPROTO_NONE;
2101 iph->hop_limit = 0;
2102 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
2103 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
2104
6bd52143 2105 err = ip6mr_cache_unresolved(mrt, vif, skb2);
7bc570c8
YH
2106 read_unlock(&mrt_lock);
2107
2108 return err;
2109 }
2110
2111 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2112 cache->mfc_flags |= MFC_NOTIFY;
2113
5b285cac 2114 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
7bc570c8
YH
2115 read_unlock(&mrt_lock);
2116 return err;
2117}
2118
5b285cac
PM
2119static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2120 u32 pid, u32 seq, struct mfc6_cache *c)
2121{
2122 struct nlmsghdr *nlh;
2123 struct rtmsg *rtm;
2124
2125 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2126 if (nlh == NULL)
2127 return -EMSGSIZE;
2128
2129 rtm = nlmsg_data(nlh);
2130 rtm->rtm_family = RTNL_FAMILY_IPMR;
2131 rtm->rtm_dst_len = 128;
2132 rtm->rtm_src_len = 128;
2133 rtm->rtm_tos = 0;
2134 rtm->rtm_table = mrt->id;
2135 NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
2136 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2137 rtm->rtm_protocol = RTPROT_UNSPEC;
2138 rtm->rtm_flags = 0;
2139
2140 NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
2141 NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
2142
2143 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
2144 goto nla_put_failure;
2145
2146 return nlmsg_end(skb, nlh);
2147
2148nla_put_failure:
2149 nlmsg_cancel(skb, nlh);
2150 return -EMSGSIZE;
2151}
2152
2153static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2154{
2155 struct net *net = sock_net(skb->sk);
2156 struct mr6_table *mrt;
2157 struct mfc6_cache *mfc;
2158 unsigned int t = 0, s_t;
2159 unsigned int h = 0, s_h;
2160 unsigned int e = 0, s_e;
2161
2162 s_t = cb->args[0];
2163 s_h = cb->args[1];
2164 s_e = cb->args[2];
2165
2166 read_lock(&mrt_lock);
2167 ip6mr_for_each_table(mrt, net) {
2168 if (t < s_t)
2169 goto next_table;
2170 if (t > s_t)
2171 s_h = 0;
2172 for (h = s_h; h < MFC6_LINES; h++) {
2173 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2174 if (e < s_e)
2175 goto next_entry;
2176 if (ip6mr_fill_mroute(mrt, skb,
2177 NETLINK_CB(cb->skb).pid,
2178 cb->nlh->nlmsg_seq,
2179 mfc) < 0)
2180 goto done;
2181next_entry:
2182 e++;
2183 }
2184 e = s_e = 0;
2185 }
2186 s_h = 0;
2187next_table:
2188 t++;
2189 }
2190done:
2191 read_unlock(&mrt_lock);
2192
2193 cb->args[2] = e;
2194 cb->args[1] = h;
2195 cb->args[0] = t;
2196
2197 return skb->len;
2198}