81de0106528b72be4efb6904d624ee287e5aea7d
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / bridge / br_multicast.c
1 /*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <linux/err.h>
14 #include <linux/if_ether.h>
15 #include <linux/igmp.h>
16 #include <linux/jhash.h>
17 #include <linux/kernel.h>
18 #include <linux/log2.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_bridge.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/skbuff.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <net/ip.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <net/ipv6.h>
29 #include <net/mld.h>
30 #include <net/ip6_checksum.h>
31 #endif
32
33 #include "br_private.h"
34
35 static void br_multicast_start_querier(struct net_bridge *br);
36 unsigned int br_mdb_rehash_seq;
37
38 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
39 {
40 if (a->proto != b->proto)
41 return 0;
42 if (a->vid != b->vid)
43 return 0;
44 switch (a->proto) {
45 case htons(ETH_P_IP):
46 return a->u.ip4 == b->u.ip4;
47 #if IS_ENABLED(CONFIG_IPV6)
48 case htons(ETH_P_IPV6):
49 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
50 #endif
51 }
52 return 0;
53 }
54
55 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
56 __u16 vid)
57 {
58 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
59 }
60
61 #if IS_ENABLED(CONFIG_IPV6)
62 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
63 const struct in6_addr *ip,
64 __u16 vid)
65 {
66 return jhash_2words(ipv6_addr_hash(ip), vid,
67 mdb->secret) & (mdb->max - 1);
68 }
69 #endif
70
71 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
72 struct br_ip *ip)
73 {
74 switch (ip->proto) {
75 case htons(ETH_P_IP):
76 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
77 #if IS_ENABLED(CONFIG_IPV6)
78 case htons(ETH_P_IPV6):
79 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
80 #endif
81 }
82 return 0;
83 }
84
85 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
86 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
87 {
88 struct net_bridge_mdb_entry *mp;
89
90 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
91 if (br_ip_equal(&mp->addr, dst))
92 return mp;
93 }
94
95 return NULL;
96 }
97
98 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
99 struct br_ip *dst)
100 {
101 if (!mdb)
102 return NULL;
103
104 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
105 }
106
107 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
108 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
109 {
110 struct br_ip br_dst;
111
112 br_dst.u.ip4 = dst;
113 br_dst.proto = htons(ETH_P_IP);
114 br_dst.vid = vid;
115
116 return br_mdb_ip_get(mdb, &br_dst);
117 }
118
119 #if IS_ENABLED(CONFIG_IPV6)
120 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
121 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
122 __u16 vid)
123 {
124 struct br_ip br_dst;
125
126 br_dst.u.ip6 = *dst;
127 br_dst.proto = htons(ETH_P_IPV6);
128 br_dst.vid = vid;
129
130 return br_mdb_ip_get(mdb, &br_dst);
131 }
132 #endif
133
134 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
135 struct sk_buff *skb, u16 vid)
136 {
137 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
138 struct br_ip ip;
139
140 if (br->multicast_disabled)
141 return NULL;
142
143 if (BR_INPUT_SKB_CB(skb)->igmp)
144 return NULL;
145
146 ip.proto = skb->protocol;
147 ip.vid = vid;
148
149 switch (skb->protocol) {
150 case htons(ETH_P_IP):
151 ip.u.ip4 = ip_hdr(skb)->daddr;
152 break;
153 #if IS_ENABLED(CONFIG_IPV6)
154 case htons(ETH_P_IPV6):
155 ip.u.ip6 = ipv6_hdr(skb)->daddr;
156 break;
157 #endif
158 default:
159 return NULL;
160 }
161
162 return br_mdb_ip_get(mdb, &ip);
163 }
164
165 static void br_mdb_free(struct rcu_head *head)
166 {
167 struct net_bridge_mdb_htable *mdb =
168 container_of(head, struct net_bridge_mdb_htable, rcu);
169 struct net_bridge_mdb_htable *old = mdb->old;
170
171 mdb->old = NULL;
172 kfree(old->mhash);
173 kfree(old);
174 }
175
176 static int br_mdb_copy(struct net_bridge_mdb_htable *new,
177 struct net_bridge_mdb_htable *old,
178 int elasticity)
179 {
180 struct net_bridge_mdb_entry *mp;
181 int maxlen;
182 int len;
183 int i;
184
185 for (i = 0; i < old->max; i++)
186 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
187 hlist_add_head(&mp->hlist[new->ver],
188 &new->mhash[br_ip_hash(new, &mp->addr)]);
189
190 if (!elasticity)
191 return 0;
192
193 maxlen = 0;
194 for (i = 0; i < new->max; i++) {
195 len = 0;
196 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
197 len++;
198 if (len > maxlen)
199 maxlen = len;
200 }
201
202 return maxlen > elasticity ? -EINVAL : 0;
203 }
204
205 void br_multicast_free_pg(struct rcu_head *head)
206 {
207 struct net_bridge_port_group *p =
208 container_of(head, struct net_bridge_port_group, rcu);
209
210 kfree(p);
211 }
212
213 static void br_multicast_free_group(struct rcu_head *head)
214 {
215 struct net_bridge_mdb_entry *mp =
216 container_of(head, struct net_bridge_mdb_entry, rcu);
217
218 kfree(mp);
219 }
220
221 static void br_multicast_group_expired(unsigned long data)
222 {
223 struct net_bridge_mdb_entry *mp = (void *)data;
224 struct net_bridge *br = mp->br;
225 struct net_bridge_mdb_htable *mdb;
226
227 spin_lock(&br->multicast_lock);
228 if (!netif_running(br->dev) || timer_pending(&mp->timer))
229 goto out;
230
231 mp->mglist = false;
232
233 if (mp->ports)
234 goto out;
235
236 mdb = mlock_dereference(br->mdb, br);
237
238 hlist_del_rcu(&mp->hlist[mdb->ver]);
239 mdb->size--;
240
241 call_rcu_bh(&mp->rcu, br_multicast_free_group);
242
243 out:
244 spin_unlock(&br->multicast_lock);
245 }
246
247 static void br_multicast_del_pg(struct net_bridge *br,
248 struct net_bridge_port_group *pg)
249 {
250 struct net_bridge_mdb_htable *mdb;
251 struct net_bridge_mdb_entry *mp;
252 struct net_bridge_port_group *p;
253 struct net_bridge_port_group __rcu **pp;
254
255 mdb = mlock_dereference(br->mdb, br);
256
257 mp = br_mdb_ip_get(mdb, &pg->addr);
258 if (WARN_ON(!mp))
259 return;
260
261 for (pp = &mp->ports;
262 (p = mlock_dereference(*pp, br)) != NULL;
263 pp = &p->next) {
264 if (p != pg)
265 continue;
266
267 rcu_assign_pointer(*pp, p->next);
268 hlist_del_init(&p->mglist);
269 del_timer(&p->timer);
270 call_rcu_bh(&p->rcu, br_multicast_free_pg);
271
272 if (!mp->ports && !mp->mglist &&
273 netif_running(br->dev))
274 mod_timer(&mp->timer, jiffies);
275
276 return;
277 }
278
279 WARN_ON(1);
280 }
281
282 static void br_multicast_port_group_expired(unsigned long data)
283 {
284 struct net_bridge_port_group *pg = (void *)data;
285 struct net_bridge *br = pg->port->br;
286
287 spin_lock(&br->multicast_lock);
288 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
289 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
290 goto out;
291
292 br_multicast_del_pg(br, pg);
293
294 out:
295 spin_unlock(&br->multicast_lock);
296 }
297
298 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
299 int elasticity)
300 {
301 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
302 struct net_bridge_mdb_htable *mdb;
303 int err;
304
305 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
306 if (!mdb)
307 return -ENOMEM;
308
309 mdb->max = max;
310 mdb->old = old;
311
312 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
313 if (!mdb->mhash) {
314 kfree(mdb);
315 return -ENOMEM;
316 }
317
318 mdb->size = old ? old->size : 0;
319 mdb->ver = old ? old->ver ^ 1 : 0;
320
321 if (!old || elasticity)
322 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
323 else
324 mdb->secret = old->secret;
325
326 if (!old)
327 goto out;
328
329 err = br_mdb_copy(mdb, old, elasticity);
330 if (err) {
331 kfree(mdb->mhash);
332 kfree(mdb);
333 return err;
334 }
335
336 br_mdb_rehash_seq++;
337 call_rcu_bh(&mdb->rcu, br_mdb_free);
338
339 out:
340 rcu_assign_pointer(*mdbp, mdb);
341
342 return 0;
343 }
344
345 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
346 __be32 group)
347 {
348 struct sk_buff *skb;
349 struct igmphdr *ih;
350 struct ethhdr *eth;
351 struct iphdr *iph;
352
353 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
354 sizeof(*ih) + 4);
355 if (!skb)
356 goto out;
357
358 skb->protocol = htons(ETH_P_IP);
359
360 skb_reset_mac_header(skb);
361 eth = eth_hdr(skb);
362
363 memcpy(eth->h_source, br->dev->dev_addr, 6);
364 eth->h_dest[0] = 1;
365 eth->h_dest[1] = 0;
366 eth->h_dest[2] = 0x5e;
367 eth->h_dest[3] = 0;
368 eth->h_dest[4] = 0;
369 eth->h_dest[5] = 1;
370 eth->h_proto = htons(ETH_P_IP);
371 skb_put(skb, sizeof(*eth));
372
373 skb_set_network_header(skb, skb->len);
374 iph = ip_hdr(skb);
375
376 iph->version = 4;
377 iph->ihl = 6;
378 iph->tos = 0xc0;
379 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
380 iph->id = 0;
381 iph->frag_off = htons(IP_DF);
382 iph->ttl = 1;
383 iph->protocol = IPPROTO_IGMP;
384 iph->saddr = 0;
385 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
386 ((u8 *)&iph[1])[0] = IPOPT_RA;
387 ((u8 *)&iph[1])[1] = 4;
388 ((u8 *)&iph[1])[2] = 0;
389 ((u8 *)&iph[1])[3] = 0;
390 ip_send_check(iph);
391 skb_put(skb, 24);
392
393 skb_set_transport_header(skb, skb->len);
394 ih = igmp_hdr(skb);
395 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
396 ih->code = (group ? br->multicast_last_member_interval :
397 br->multicast_query_response_interval) /
398 (HZ / IGMP_TIMER_SCALE);
399 ih->group = group;
400 ih->csum = 0;
401 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
402 skb_put(skb, sizeof(*ih));
403
404 __skb_pull(skb, sizeof(*eth));
405
406 out:
407 return skb;
408 }
409
410 #if IS_ENABLED(CONFIG_IPV6)
411 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
412 const struct in6_addr *group)
413 {
414 struct sk_buff *skb;
415 struct ipv6hdr *ip6h;
416 struct mld_msg *mldq;
417 struct ethhdr *eth;
418 u8 *hopopt;
419 unsigned long interval;
420
421 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
422 8 + sizeof(*mldq));
423 if (!skb)
424 goto out;
425
426 skb->protocol = htons(ETH_P_IPV6);
427
428 /* Ethernet header */
429 skb_reset_mac_header(skb);
430 eth = eth_hdr(skb);
431
432 memcpy(eth->h_source, br->dev->dev_addr, 6);
433 eth->h_proto = htons(ETH_P_IPV6);
434 skb_put(skb, sizeof(*eth));
435
436 /* IPv6 header + HbH option */
437 skb_set_network_header(skb, skb->len);
438 ip6h = ipv6_hdr(skb);
439
440 *(__force __be32 *)ip6h = htonl(0x60000000);
441 ip6h->payload_len = htons(8 + sizeof(*mldq));
442 ip6h->nexthdr = IPPROTO_HOPOPTS;
443 ip6h->hop_limit = 1;
444 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
445 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
446 &ip6h->saddr)) {
447 kfree_skb(skb);
448 return NULL;
449 }
450 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
451
452 hopopt = (u8 *)(ip6h + 1);
453 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
454 hopopt[1] = 0; /* length of HbH */
455 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
456 hopopt[3] = 2; /* Length of RA Option */
457 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
458 hopopt[5] = 0;
459 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
460 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
461
462 skb_put(skb, sizeof(*ip6h) + 8);
463
464 /* ICMPv6 */
465 skb_set_transport_header(skb, skb->len);
466 mldq = (struct mld_msg *) icmp6_hdr(skb);
467
468 interval = ipv6_addr_any(group) ?
469 br->multicast_query_response_interval :
470 br->multicast_last_member_interval;
471
472 mldq->mld_type = ICMPV6_MGM_QUERY;
473 mldq->mld_code = 0;
474 mldq->mld_cksum = 0;
475 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
476 mldq->mld_reserved = 0;
477 mldq->mld_mca = *group;
478
479 /* checksum */
480 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
481 sizeof(*mldq), IPPROTO_ICMPV6,
482 csum_partial(mldq,
483 sizeof(*mldq), 0));
484 skb_put(skb, sizeof(*mldq));
485
486 __skb_pull(skb, sizeof(*eth));
487
488 out:
489 return skb;
490 }
491 #endif
492
493 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
494 struct br_ip *addr)
495 {
496 switch (addr->proto) {
497 case htons(ETH_P_IP):
498 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
499 #if IS_ENABLED(CONFIG_IPV6)
500 case htons(ETH_P_IPV6):
501 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
502 #endif
503 }
504 return NULL;
505 }
506
507 static struct net_bridge_mdb_entry *br_multicast_get_group(
508 struct net_bridge *br, struct net_bridge_port *port,
509 struct br_ip *group, int hash)
510 {
511 struct net_bridge_mdb_htable *mdb;
512 struct net_bridge_mdb_entry *mp;
513 unsigned int count = 0;
514 unsigned int max;
515 int elasticity;
516 int err;
517
518 mdb = rcu_dereference_protected(br->mdb, 1);
519 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
520 count++;
521 if (unlikely(br_ip_equal(group, &mp->addr)))
522 return mp;
523 }
524
525 elasticity = 0;
526 max = mdb->max;
527
528 if (unlikely(count > br->hash_elasticity && count)) {
529 if (net_ratelimit())
530 br_info(br, "Multicast hash table "
531 "chain limit reached: %s\n",
532 port ? port->dev->name : br->dev->name);
533
534 elasticity = br->hash_elasticity;
535 }
536
537 if (mdb->size >= max) {
538 max *= 2;
539 if (unlikely(max > br->hash_max)) {
540 br_warn(br, "Multicast hash table maximum of %d "
541 "reached, disabling snooping: %s\n",
542 br->hash_max,
543 port ? port->dev->name : br->dev->name);
544 err = -E2BIG;
545 disable:
546 br->multicast_disabled = 1;
547 goto err;
548 }
549 }
550
551 if (max > mdb->max || elasticity) {
552 if (mdb->old) {
553 if (net_ratelimit())
554 br_info(br, "Multicast hash table "
555 "on fire: %s\n",
556 port ? port->dev->name : br->dev->name);
557 err = -EEXIST;
558 goto err;
559 }
560
561 err = br_mdb_rehash(&br->mdb, max, elasticity);
562 if (err) {
563 br_warn(br, "Cannot rehash multicast "
564 "hash table, disabling snooping: %s, %d, %d\n",
565 port ? port->dev->name : br->dev->name,
566 mdb->size, err);
567 goto disable;
568 }
569
570 err = -EAGAIN;
571 goto err;
572 }
573
574 return NULL;
575
576 err:
577 mp = ERR_PTR(err);
578 return mp;
579 }
580
581 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
582 struct net_bridge_port *port, struct br_ip *group)
583 {
584 struct net_bridge_mdb_htable *mdb;
585 struct net_bridge_mdb_entry *mp;
586 int hash;
587 int err;
588
589 mdb = rcu_dereference_protected(br->mdb, 1);
590 if (!mdb) {
591 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
592 if (err)
593 return ERR_PTR(err);
594 goto rehash;
595 }
596
597 hash = br_ip_hash(mdb, group);
598 mp = br_multicast_get_group(br, port, group, hash);
599 switch (PTR_ERR(mp)) {
600 case 0:
601 break;
602
603 case -EAGAIN:
604 rehash:
605 mdb = rcu_dereference_protected(br->mdb, 1);
606 hash = br_ip_hash(mdb, group);
607 break;
608
609 default:
610 goto out;
611 }
612
613 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
614 if (unlikely(!mp))
615 return ERR_PTR(-ENOMEM);
616
617 mp->br = br;
618 mp->addr = *group;
619 setup_timer(&mp->timer, br_multicast_group_expired,
620 (unsigned long)mp);
621
622 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
623 mdb->size++;
624
625 out:
626 return mp;
627 }
628
629 struct net_bridge_port_group *br_multicast_new_port_group(
630 struct net_bridge_port *port,
631 struct br_ip *group,
632 struct net_bridge_port_group __rcu *next,
633 unsigned char state)
634 {
635 struct net_bridge_port_group *p;
636
637 p = kzalloc(sizeof(*p), GFP_ATOMIC);
638 if (unlikely(!p))
639 return NULL;
640
641 p->addr = *group;
642 p->port = port;
643 p->state = state;
644 rcu_assign_pointer(p->next, next);
645 hlist_add_head(&p->mglist, &port->mglist);
646 setup_timer(&p->timer, br_multicast_port_group_expired,
647 (unsigned long)p);
648 return p;
649 }
650
651 static int br_multicast_add_group(struct net_bridge *br,
652 struct net_bridge_port *port,
653 struct br_ip *group)
654 {
655 struct net_bridge_mdb_entry *mp;
656 struct net_bridge_port_group *p;
657 struct net_bridge_port_group __rcu **pp;
658 unsigned long now = jiffies;
659 int err;
660
661 spin_lock(&br->multicast_lock);
662 if (!netif_running(br->dev) ||
663 (port && port->state == BR_STATE_DISABLED))
664 goto out;
665
666 mp = br_multicast_new_group(br, port, group);
667 err = PTR_ERR(mp);
668 if (IS_ERR(mp))
669 goto err;
670
671 if (!port) {
672 mp->mglist = true;
673 mod_timer(&mp->timer, now + br->multicast_membership_interval);
674 goto out;
675 }
676
677 for (pp = &mp->ports;
678 (p = mlock_dereference(*pp, br)) != NULL;
679 pp = &p->next) {
680 if (p->port == port)
681 goto found;
682 if ((unsigned long)p->port < (unsigned long)port)
683 break;
684 }
685
686 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
687 if (unlikely(!p))
688 goto err;
689 rcu_assign_pointer(*pp, p);
690 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
691
692 found:
693 mod_timer(&p->timer, now + br->multicast_membership_interval);
694 out:
695 err = 0;
696
697 err:
698 spin_unlock(&br->multicast_lock);
699 return err;
700 }
701
702 static int br_ip4_multicast_add_group(struct net_bridge *br,
703 struct net_bridge_port *port,
704 __be32 group,
705 __u16 vid)
706 {
707 struct br_ip br_group;
708
709 if (ipv4_is_local_multicast(group))
710 return 0;
711
712 br_group.u.ip4 = group;
713 br_group.proto = htons(ETH_P_IP);
714 br_group.vid = vid;
715
716 return br_multicast_add_group(br, port, &br_group);
717 }
718
719 #if IS_ENABLED(CONFIG_IPV6)
720 static int br_ip6_multicast_add_group(struct net_bridge *br,
721 struct net_bridge_port *port,
722 const struct in6_addr *group,
723 __u16 vid)
724 {
725 struct br_ip br_group;
726
727 if (!ipv6_is_transient_multicast(group))
728 return 0;
729
730 br_group.u.ip6 = *group;
731 br_group.proto = htons(ETH_P_IPV6);
732 br_group.vid = vid;
733
734 return br_multicast_add_group(br, port, &br_group);
735 }
736 #endif
737
738 static void br_multicast_router_expired(unsigned long data)
739 {
740 struct net_bridge_port *port = (void *)data;
741 struct net_bridge *br = port->br;
742
743 spin_lock(&br->multicast_lock);
744 if (port->multicast_router != 1 ||
745 timer_pending(&port->multicast_router_timer) ||
746 hlist_unhashed(&port->rlist))
747 goto out;
748
749 hlist_del_init_rcu(&port->rlist);
750
751 out:
752 spin_unlock(&br->multicast_lock);
753 }
754
755 static void br_multicast_local_router_expired(unsigned long data)
756 {
757 }
758
759 static void br_multicast_querier_expired(unsigned long data)
760 {
761 struct net_bridge *br = (void *)data;
762
763 spin_lock(&br->multicast_lock);
764 if (!netif_running(br->dev) || br->multicast_disabled)
765 goto out;
766
767 br_multicast_start_querier(br);
768
769 out:
770 spin_unlock(&br->multicast_lock);
771 }
772
773 static void __br_multicast_send_query(struct net_bridge *br,
774 struct net_bridge_port *port,
775 struct br_ip *ip)
776 {
777 struct sk_buff *skb;
778
779 skb = br_multicast_alloc_query(br, ip);
780 if (!skb)
781 return;
782
783 if (port) {
784 __skb_push(skb, sizeof(struct ethhdr));
785 skb->dev = port->dev;
786 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
787 dev_queue_xmit);
788 } else
789 netif_rx(skb);
790 }
791
792 static void br_multicast_send_query(struct net_bridge *br,
793 struct net_bridge_port *port, u32 sent)
794 {
795 unsigned long time;
796 struct br_ip br_group;
797
798 if (!netif_running(br->dev) || br->multicast_disabled ||
799 !br->multicast_querier ||
800 timer_pending(&br->multicast_querier_timer))
801 return;
802
803 memset(&br_group.u, 0, sizeof(br_group.u));
804
805 br_group.proto = htons(ETH_P_IP);
806 __br_multicast_send_query(br, port, &br_group);
807
808 #if IS_ENABLED(CONFIG_IPV6)
809 br_group.proto = htons(ETH_P_IPV6);
810 __br_multicast_send_query(br, port, &br_group);
811 #endif
812
813 time = jiffies;
814 time += sent < br->multicast_startup_query_count ?
815 br->multicast_startup_query_interval :
816 br->multicast_query_interval;
817 mod_timer(port ? &port->multicast_query_timer :
818 &br->multicast_query_timer, time);
819 }
820
821 static void br_multicast_port_query_expired(unsigned long data)
822 {
823 struct net_bridge_port *port = (void *)data;
824 struct net_bridge *br = port->br;
825
826 spin_lock(&br->multicast_lock);
827 if (port->state == BR_STATE_DISABLED ||
828 port->state == BR_STATE_BLOCKING)
829 goto out;
830
831 if (port->multicast_startup_queries_sent <
832 br->multicast_startup_query_count)
833 port->multicast_startup_queries_sent++;
834
835 br_multicast_send_query(port->br, port,
836 port->multicast_startup_queries_sent);
837
838 out:
839 spin_unlock(&br->multicast_lock);
840 }
841
842 void br_multicast_add_port(struct net_bridge_port *port)
843 {
844 port->multicast_router = 1;
845
846 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
847 (unsigned long)port);
848 setup_timer(&port->multicast_query_timer,
849 br_multicast_port_query_expired, (unsigned long)port);
850 }
851
852 void br_multicast_del_port(struct net_bridge_port *port)
853 {
854 del_timer_sync(&port->multicast_router_timer);
855 }
856
857 static void __br_multicast_enable_port(struct net_bridge_port *port)
858 {
859 port->multicast_startup_queries_sent = 0;
860
861 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
862 del_timer(&port->multicast_query_timer))
863 mod_timer(&port->multicast_query_timer, jiffies);
864 }
865
866 void br_multicast_enable_port(struct net_bridge_port *port)
867 {
868 struct net_bridge *br = port->br;
869
870 spin_lock(&br->multicast_lock);
871 if (br->multicast_disabled || !netif_running(br->dev))
872 goto out;
873
874 __br_multicast_enable_port(port);
875
876 out:
877 spin_unlock(&br->multicast_lock);
878 }
879
880 void br_multicast_disable_port(struct net_bridge_port *port)
881 {
882 struct net_bridge *br = port->br;
883 struct net_bridge_port_group *pg;
884 struct hlist_node *n;
885
886 spin_lock(&br->multicast_lock);
887 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
888 br_multicast_del_pg(br, pg);
889
890 if (!hlist_unhashed(&port->rlist))
891 hlist_del_init_rcu(&port->rlist);
892 del_timer(&port->multicast_router_timer);
893 del_timer(&port->multicast_query_timer);
894 spin_unlock(&br->multicast_lock);
895 }
896
897 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
898 struct net_bridge_port *port,
899 struct sk_buff *skb)
900 {
901 struct igmpv3_report *ih;
902 struct igmpv3_grec *grec;
903 int i;
904 int len;
905 int num;
906 int type;
907 int err = 0;
908 __be32 group;
909 u16 vid = 0;
910
911 if (!pskb_may_pull(skb, sizeof(*ih)))
912 return -EINVAL;
913
914 br_vlan_get_tag(skb, &vid);
915 ih = igmpv3_report_hdr(skb);
916 num = ntohs(ih->ngrec);
917 len = sizeof(*ih);
918
919 for (i = 0; i < num; i++) {
920 len += sizeof(*grec);
921 if (!pskb_may_pull(skb, len))
922 return -EINVAL;
923
924 grec = (void *)(skb->data + len - sizeof(*grec));
925 group = grec->grec_mca;
926 type = grec->grec_type;
927
928 len += ntohs(grec->grec_nsrcs) * 4;
929 if (!pskb_may_pull(skb, len))
930 return -EINVAL;
931
932 /* We treat this as an IGMPv2 report for now. */
933 switch (type) {
934 case IGMPV3_MODE_IS_INCLUDE:
935 case IGMPV3_MODE_IS_EXCLUDE:
936 case IGMPV3_CHANGE_TO_INCLUDE:
937 case IGMPV3_CHANGE_TO_EXCLUDE:
938 case IGMPV3_ALLOW_NEW_SOURCES:
939 case IGMPV3_BLOCK_OLD_SOURCES:
940 break;
941
942 default:
943 continue;
944 }
945
946 err = br_ip4_multicast_add_group(br, port, group, vid);
947 if (err)
948 break;
949 }
950
951 return err;
952 }
953
954 #if IS_ENABLED(CONFIG_IPV6)
955 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
956 struct net_bridge_port *port,
957 struct sk_buff *skb)
958 {
959 struct icmp6hdr *icmp6h;
960 struct mld2_grec *grec;
961 int i;
962 int len;
963 int num;
964 int err = 0;
965 u16 vid = 0;
966
967 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
968 return -EINVAL;
969
970 br_vlan_get_tag(skb, &vid);
971 icmp6h = icmp6_hdr(skb);
972 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
973 len = sizeof(*icmp6h);
974
975 for (i = 0; i < num; i++) {
976 __be16 *nsrcs, _nsrcs;
977
978 nsrcs = skb_header_pointer(skb,
979 len + offsetof(struct mld2_grec,
980 grec_nsrcs),
981 sizeof(_nsrcs), &_nsrcs);
982 if (!nsrcs)
983 return -EINVAL;
984
985 if (!pskb_may_pull(skb,
986 len + sizeof(*grec) +
987 sizeof(struct in6_addr) * ntohs(*nsrcs)))
988 return -EINVAL;
989
990 grec = (struct mld2_grec *)(skb->data + len);
991 len += sizeof(*grec) +
992 sizeof(struct in6_addr) * ntohs(*nsrcs);
993
994 /* We treat these as MLDv1 reports for now. */
995 switch (grec->grec_type) {
996 case MLD2_MODE_IS_INCLUDE:
997 case MLD2_MODE_IS_EXCLUDE:
998 case MLD2_CHANGE_TO_INCLUDE:
999 case MLD2_CHANGE_TO_EXCLUDE:
1000 case MLD2_ALLOW_NEW_SOURCES:
1001 case MLD2_BLOCK_OLD_SOURCES:
1002 break;
1003
1004 default:
1005 continue;
1006 }
1007
1008 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1009 vid);
1010 if (!err)
1011 break;
1012 }
1013
1014 return err;
1015 }
1016 #endif
1017
1018 /*
1019 * Add port to rotuer_list
1020 * list is maintained ordered by pointer value
1021 * and locked by br->multicast_lock and RCU
1022 */
1023 static void br_multicast_add_router(struct net_bridge *br,
1024 struct net_bridge_port *port)
1025 {
1026 struct net_bridge_port *p;
1027 struct hlist_node *slot = NULL;
1028
1029 hlist_for_each_entry(p, &br->router_list, rlist) {
1030 if ((unsigned long) port >= (unsigned long) p)
1031 break;
1032 slot = &p->rlist;
1033 }
1034
1035 if (slot)
1036 hlist_add_after_rcu(slot, &port->rlist);
1037 else
1038 hlist_add_head_rcu(&port->rlist, &br->router_list);
1039 }
1040
1041 static void br_multicast_mark_router(struct net_bridge *br,
1042 struct net_bridge_port *port)
1043 {
1044 unsigned long now = jiffies;
1045
1046 if (!port) {
1047 if (br->multicast_router == 1)
1048 mod_timer(&br->multicast_router_timer,
1049 now + br->multicast_querier_interval);
1050 return;
1051 }
1052
1053 if (port->multicast_router != 1)
1054 return;
1055
1056 if (!hlist_unhashed(&port->rlist))
1057 goto timer;
1058
1059 br_multicast_add_router(br, port);
1060
1061 timer:
1062 mod_timer(&port->multicast_router_timer,
1063 now + br->multicast_querier_interval);
1064 }
1065
1066 static void br_multicast_query_received(struct net_bridge *br,
1067 struct net_bridge_port *port,
1068 int saddr)
1069 {
1070 if (saddr)
1071 mod_timer(&br->multicast_querier_timer,
1072 jiffies + br->multicast_querier_interval);
1073 else if (timer_pending(&br->multicast_querier_timer))
1074 return;
1075
1076 br_multicast_mark_router(br, port);
1077 }
1078
1079 static int br_ip4_multicast_query(struct net_bridge *br,
1080 struct net_bridge_port *port,
1081 struct sk_buff *skb)
1082 {
1083 const struct iphdr *iph = ip_hdr(skb);
1084 struct igmphdr *ih = igmp_hdr(skb);
1085 struct net_bridge_mdb_entry *mp;
1086 struct igmpv3_query *ih3;
1087 struct net_bridge_port_group *p;
1088 struct net_bridge_port_group __rcu **pp;
1089 unsigned long max_delay;
1090 unsigned long now = jiffies;
1091 __be32 group;
1092 int err = 0;
1093 u16 vid = 0;
1094
1095 spin_lock(&br->multicast_lock);
1096 if (!netif_running(br->dev) ||
1097 (port && port->state == BR_STATE_DISABLED))
1098 goto out;
1099
1100 br_multicast_query_received(br, port, !!iph->saddr);
1101
1102 group = ih->group;
1103
1104 if (skb->len == sizeof(*ih)) {
1105 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1106
1107 if (!max_delay) {
1108 max_delay = 10 * HZ;
1109 group = 0;
1110 }
1111 } else {
1112 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
1113 err = -EINVAL;
1114 goto out;
1115 }
1116
1117 ih3 = igmpv3_query_hdr(skb);
1118 if (ih3->nsrcs)
1119 goto out;
1120
1121 max_delay = ih3->code ?
1122 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1123 }
1124
1125 if (!group)
1126 goto out;
1127
1128 br_vlan_get_tag(skb, &vid);
1129 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1130 if (!mp)
1131 goto out;
1132
1133 max_delay *= br->multicast_last_member_count;
1134
1135 if (mp->mglist &&
1136 (timer_pending(&mp->timer) ?
1137 time_after(mp->timer.expires, now + max_delay) :
1138 try_to_del_timer_sync(&mp->timer) >= 0))
1139 mod_timer(&mp->timer, now + max_delay);
1140
1141 for (pp = &mp->ports;
1142 (p = mlock_dereference(*pp, br)) != NULL;
1143 pp = &p->next) {
1144 if (timer_pending(&p->timer) ?
1145 time_after(p->timer.expires, now + max_delay) :
1146 try_to_del_timer_sync(&p->timer) >= 0)
1147 mod_timer(&p->timer, now + max_delay);
1148 }
1149
1150 out:
1151 spin_unlock(&br->multicast_lock);
1152 return err;
1153 }
1154
1155 #if IS_ENABLED(CONFIG_IPV6)
1156 static int br_ip6_multicast_query(struct net_bridge *br,
1157 struct net_bridge_port *port,
1158 struct sk_buff *skb)
1159 {
1160 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1161 struct mld_msg *mld;
1162 struct net_bridge_mdb_entry *mp;
1163 struct mld2_query *mld2q;
1164 struct net_bridge_port_group *p;
1165 struct net_bridge_port_group __rcu **pp;
1166 unsigned long max_delay;
1167 unsigned long now = jiffies;
1168 const struct in6_addr *group = NULL;
1169 int err = 0;
1170 u16 vid = 0;
1171
1172 spin_lock(&br->multicast_lock);
1173 if (!netif_running(br->dev) ||
1174 (port && port->state == BR_STATE_DISABLED))
1175 goto out;
1176
1177 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1178
1179 /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
1180 if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
1181 err = -EINVAL;
1182 goto out;
1183 }
1184
1185 if (skb->len == sizeof(*mld)) {
1186 if (!pskb_may_pull(skb, sizeof(*mld))) {
1187 err = -EINVAL;
1188 goto out;
1189 }
1190 mld = (struct mld_msg *) icmp6_hdr(skb);
1191 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1192 if (max_delay)
1193 group = &mld->mld_mca;
1194 } else {
1195 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1196 err = -EINVAL;
1197 goto out;
1198 }
1199 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1200 if (!mld2q->mld2q_nsrcs)
1201 group = &mld2q->mld2q_mca;
1202
1203 max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
1204 }
1205
1206 if (!group)
1207 goto out;
1208
1209 br_vlan_get_tag(skb, &vid);
1210 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1211 if (!mp)
1212 goto out;
1213
1214 max_delay *= br->multicast_last_member_count;
1215 if (mp->mglist &&
1216 (timer_pending(&mp->timer) ?
1217 time_after(mp->timer.expires, now + max_delay) :
1218 try_to_del_timer_sync(&mp->timer) >= 0))
1219 mod_timer(&mp->timer, now + max_delay);
1220
1221 for (pp = &mp->ports;
1222 (p = mlock_dereference(*pp, br)) != NULL;
1223 pp = &p->next) {
1224 if (timer_pending(&p->timer) ?
1225 time_after(p->timer.expires, now + max_delay) :
1226 try_to_del_timer_sync(&p->timer) >= 0)
1227 mod_timer(&p->timer, now + max_delay);
1228 }
1229
1230 out:
1231 spin_unlock(&br->multicast_lock);
1232 return err;
1233 }
1234 #endif
1235
1236 static void br_multicast_leave_group(struct net_bridge *br,
1237 struct net_bridge_port *port,
1238 struct br_ip *group)
1239 {
1240 struct net_bridge_mdb_htable *mdb;
1241 struct net_bridge_mdb_entry *mp;
1242 struct net_bridge_port_group *p;
1243 unsigned long now;
1244 unsigned long time;
1245
1246 spin_lock(&br->multicast_lock);
1247 if (!netif_running(br->dev) ||
1248 (port && port->state == BR_STATE_DISABLED) ||
1249 timer_pending(&br->multicast_querier_timer))
1250 goto out;
1251
1252 mdb = mlock_dereference(br->mdb, br);
1253 mp = br_mdb_ip_get(mdb, group);
1254 if (!mp)
1255 goto out;
1256
1257 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1258 struct net_bridge_port_group __rcu **pp;
1259
1260 for (pp = &mp->ports;
1261 (p = mlock_dereference(*pp, br)) != NULL;
1262 pp = &p->next) {
1263 if (p->port != port)
1264 continue;
1265
1266 rcu_assign_pointer(*pp, p->next);
1267 hlist_del_init(&p->mglist);
1268 del_timer(&p->timer);
1269 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1270 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1271
1272 if (!mp->ports && !mp->mglist &&
1273 netif_running(br->dev))
1274 mod_timer(&mp->timer, jiffies);
1275 }
1276 goto out;
1277 }
1278
1279 now = jiffies;
1280 time = now + br->multicast_last_member_count *
1281 br->multicast_last_member_interval;
1282
1283 if (!port) {
1284 if (mp->mglist &&
1285 (timer_pending(&mp->timer) ?
1286 time_after(mp->timer.expires, time) :
1287 try_to_del_timer_sync(&mp->timer) >= 0)) {
1288 mod_timer(&mp->timer, time);
1289 }
1290
1291 goto out;
1292 }
1293
1294 for (p = mlock_dereference(mp->ports, br);
1295 p != NULL;
1296 p = mlock_dereference(p->next, br)) {
1297 if (p->port != port)
1298 continue;
1299
1300 if (!hlist_unhashed(&p->mglist) &&
1301 (timer_pending(&p->timer) ?
1302 time_after(p->timer.expires, time) :
1303 try_to_del_timer_sync(&p->timer) >= 0)) {
1304 mod_timer(&p->timer, time);
1305 }
1306
1307 break;
1308 }
1309
1310 out:
1311 spin_unlock(&br->multicast_lock);
1312 }
1313
1314 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1315 struct net_bridge_port *port,
1316 __be32 group,
1317 __u16 vid)
1318 {
1319 struct br_ip br_group;
1320
1321 if (ipv4_is_local_multicast(group))
1322 return;
1323
1324 br_group.u.ip4 = group;
1325 br_group.proto = htons(ETH_P_IP);
1326 br_group.vid = vid;
1327
1328 br_multicast_leave_group(br, port, &br_group);
1329 }
1330
1331 #if IS_ENABLED(CONFIG_IPV6)
1332 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1333 struct net_bridge_port *port,
1334 const struct in6_addr *group,
1335 __u16 vid)
1336 {
1337 struct br_ip br_group;
1338
1339 if (!ipv6_is_transient_multicast(group))
1340 return;
1341
1342 br_group.u.ip6 = *group;
1343 br_group.proto = htons(ETH_P_IPV6);
1344 br_group.vid = vid;
1345
1346 br_multicast_leave_group(br, port, &br_group);
1347 }
1348 #endif
1349
1350 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1351 struct net_bridge_port *port,
1352 struct sk_buff *skb)
1353 {
1354 struct sk_buff *skb2 = skb;
1355 const struct iphdr *iph;
1356 struct igmphdr *ih;
1357 unsigned int len;
1358 unsigned int offset;
1359 int err;
1360 u16 vid = 0;
1361
1362 /* We treat OOM as packet loss for now. */
1363 if (!pskb_may_pull(skb, sizeof(*iph)))
1364 return -EINVAL;
1365
1366 iph = ip_hdr(skb);
1367
1368 if (iph->ihl < 5 || iph->version != 4)
1369 return -EINVAL;
1370
1371 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
1372 return -EINVAL;
1373
1374 iph = ip_hdr(skb);
1375
1376 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1377 return -EINVAL;
1378
1379 if (iph->protocol != IPPROTO_IGMP) {
1380 if (!ipv4_is_local_multicast(iph->daddr))
1381 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1382 return 0;
1383 }
1384
1385 len = ntohs(iph->tot_len);
1386 if (skb->len < len || len < ip_hdrlen(skb))
1387 return -EINVAL;
1388
1389 if (skb->len > len) {
1390 skb2 = skb_clone(skb, GFP_ATOMIC);
1391 if (!skb2)
1392 return -ENOMEM;
1393
1394 err = pskb_trim_rcsum(skb2, len);
1395 if (err)
1396 goto err_out;
1397 }
1398
1399 len -= ip_hdrlen(skb2);
1400 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
1401 __skb_pull(skb2, offset);
1402 skb_reset_transport_header(skb2);
1403
1404 err = -EINVAL;
1405 if (!pskb_may_pull(skb2, sizeof(*ih)))
1406 goto out;
1407
1408 switch (skb2->ip_summed) {
1409 case CHECKSUM_COMPLETE:
1410 if (!csum_fold(skb2->csum))
1411 break;
1412 /* fall through */
1413 case CHECKSUM_NONE:
1414 skb2->csum = 0;
1415 if (skb_checksum_complete(skb2))
1416 goto out;
1417 }
1418
1419 err = 0;
1420
1421 br_vlan_get_tag(skb2, &vid);
1422 BR_INPUT_SKB_CB(skb)->igmp = 1;
1423 ih = igmp_hdr(skb2);
1424
1425 switch (ih->type) {
1426 case IGMP_HOST_MEMBERSHIP_REPORT:
1427 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1428 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1429 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1430 break;
1431 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1432 err = br_ip4_multicast_igmp3_report(br, port, skb2);
1433 break;
1434 case IGMP_HOST_MEMBERSHIP_QUERY:
1435 err = br_ip4_multicast_query(br, port, skb2);
1436 break;
1437 case IGMP_HOST_LEAVE_MESSAGE:
1438 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1439 break;
1440 }
1441
1442 out:
1443 __skb_push(skb2, offset);
1444 err_out:
1445 if (skb2 != skb)
1446 kfree_skb(skb2);
1447 return err;
1448 }
1449
1450 #if IS_ENABLED(CONFIG_IPV6)
1451 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1452 struct net_bridge_port *port,
1453 struct sk_buff *skb)
1454 {
1455 struct sk_buff *skb2;
1456 const struct ipv6hdr *ip6h;
1457 u8 icmp6_type;
1458 u8 nexthdr;
1459 __be16 frag_off;
1460 unsigned int len;
1461 int offset;
1462 int err;
1463 u16 vid = 0;
1464
1465 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1466 return -EINVAL;
1467
1468 ip6h = ipv6_hdr(skb);
1469
1470 /*
1471 * We're interested in MLD messages only.
1472 * - Version is 6
1473 * - MLD has always Router Alert hop-by-hop option
1474 * - But we do not support jumbrograms.
1475 */
1476 if (ip6h->version != 6 ||
1477 ip6h->nexthdr != IPPROTO_HOPOPTS ||
1478 ip6h->payload_len == 0)
1479 return 0;
1480
1481 len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
1482 if (skb->len < len)
1483 return -EINVAL;
1484
1485 nexthdr = ip6h->nexthdr;
1486 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
1487
1488 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1489 return 0;
1490
1491 /* Okay, we found ICMPv6 header */
1492 skb2 = skb_clone(skb, GFP_ATOMIC);
1493 if (!skb2)
1494 return -ENOMEM;
1495
1496 err = -EINVAL;
1497 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1498 goto out;
1499
1500 len -= offset - skb_network_offset(skb2);
1501
1502 __skb_pull(skb2, offset);
1503 skb_reset_transport_header(skb2);
1504 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1505 skb_network_header_len(skb2));
1506
1507 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
1508
1509 switch (icmp6_type) {
1510 case ICMPV6_MGM_QUERY:
1511 case ICMPV6_MGM_REPORT:
1512 case ICMPV6_MGM_REDUCTION:
1513 case ICMPV6_MLD2_REPORT:
1514 break;
1515 default:
1516 err = 0;
1517 goto out;
1518 }
1519
1520 /* Okay, we found MLD message. Check further. */
1521 if (skb2->len > len) {
1522 err = pskb_trim_rcsum(skb2, len);
1523 if (err)
1524 goto out;
1525 err = -EINVAL;
1526 }
1527
1528 ip6h = ipv6_hdr(skb2);
1529
1530 switch (skb2->ip_summed) {
1531 case CHECKSUM_COMPLETE:
1532 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1533 IPPROTO_ICMPV6, skb2->csum))
1534 break;
1535 /*FALLTHROUGH*/
1536 case CHECKSUM_NONE:
1537 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1538 &ip6h->daddr,
1539 skb2->len,
1540 IPPROTO_ICMPV6, 0));
1541 if (__skb_checksum_complete(skb2))
1542 goto out;
1543 }
1544
1545 err = 0;
1546
1547 br_vlan_get_tag(skb, &vid);
1548 BR_INPUT_SKB_CB(skb)->igmp = 1;
1549
1550 switch (icmp6_type) {
1551 case ICMPV6_MGM_REPORT:
1552 {
1553 struct mld_msg *mld;
1554 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1555 err = -EINVAL;
1556 goto out;
1557 }
1558 mld = (struct mld_msg *)skb_transport_header(skb2);
1559 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1560 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1561 break;
1562 }
1563 case ICMPV6_MLD2_REPORT:
1564 err = br_ip6_multicast_mld2_report(br, port, skb2);
1565 break;
1566 case ICMPV6_MGM_QUERY:
1567 err = br_ip6_multicast_query(br, port, skb2);
1568 break;
1569 case ICMPV6_MGM_REDUCTION:
1570 {
1571 struct mld_msg *mld;
1572 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1573 err = -EINVAL;
1574 goto out;
1575 }
1576 mld = (struct mld_msg *)skb_transport_header(skb2);
1577 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1578 }
1579 }
1580
1581 out:
1582 kfree_skb(skb2);
1583 return err;
1584 }
1585 #endif
1586
1587 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1588 struct sk_buff *skb)
1589 {
1590 BR_INPUT_SKB_CB(skb)->igmp = 0;
1591 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1592
1593 if (br->multicast_disabled)
1594 return 0;
1595
1596 switch (skb->protocol) {
1597 case htons(ETH_P_IP):
1598 return br_multicast_ipv4_rcv(br, port, skb);
1599 #if IS_ENABLED(CONFIG_IPV6)
1600 case htons(ETH_P_IPV6):
1601 return br_multicast_ipv6_rcv(br, port, skb);
1602 #endif
1603 }
1604
1605 return 0;
1606 }
1607
1608 static void br_multicast_query_expired(unsigned long data)
1609 {
1610 struct net_bridge *br = (void *)data;
1611
1612 spin_lock(&br->multicast_lock);
1613 if (br->multicast_startup_queries_sent <
1614 br->multicast_startup_query_count)
1615 br->multicast_startup_queries_sent++;
1616
1617 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1618
1619 spin_unlock(&br->multicast_lock);
1620 }
1621
1622 void br_multicast_init(struct net_bridge *br)
1623 {
1624 br->hash_elasticity = 4;
1625 br->hash_max = 512;
1626
1627 br->multicast_router = 1;
1628 br->multicast_querier = 0;
1629 br->multicast_last_member_count = 2;
1630 br->multicast_startup_query_count = 2;
1631
1632 br->multicast_last_member_interval = HZ;
1633 br->multicast_query_response_interval = 10 * HZ;
1634 br->multicast_startup_query_interval = 125 * HZ / 4;
1635 br->multicast_query_interval = 125 * HZ;
1636 br->multicast_querier_interval = 255 * HZ;
1637 br->multicast_membership_interval = 260 * HZ;
1638
1639 spin_lock_init(&br->multicast_lock);
1640 setup_timer(&br->multicast_router_timer,
1641 br_multicast_local_router_expired, 0);
1642 setup_timer(&br->multicast_querier_timer,
1643 br_multicast_querier_expired, (unsigned long)br);
1644 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1645 (unsigned long)br);
1646 }
1647
1648 void br_multicast_open(struct net_bridge *br)
1649 {
1650 br->multicast_startup_queries_sent = 0;
1651
1652 if (br->multicast_disabled)
1653 return;
1654
1655 mod_timer(&br->multicast_query_timer, jiffies);
1656 }
1657
1658 void br_multicast_stop(struct net_bridge *br)
1659 {
1660 struct net_bridge_mdb_htable *mdb;
1661 struct net_bridge_mdb_entry *mp;
1662 struct hlist_node *n;
1663 u32 ver;
1664 int i;
1665
1666 del_timer_sync(&br->multicast_router_timer);
1667 del_timer_sync(&br->multicast_querier_timer);
1668 del_timer_sync(&br->multicast_query_timer);
1669
1670 spin_lock_bh(&br->multicast_lock);
1671 mdb = mlock_dereference(br->mdb, br);
1672 if (!mdb)
1673 goto out;
1674
1675 br->mdb = NULL;
1676
1677 ver = mdb->ver;
1678 for (i = 0; i < mdb->max; i++) {
1679 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1680 hlist[ver]) {
1681 del_timer(&mp->timer);
1682 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1683 }
1684 }
1685
1686 if (mdb->old) {
1687 spin_unlock_bh(&br->multicast_lock);
1688 rcu_barrier_bh();
1689 spin_lock_bh(&br->multicast_lock);
1690 WARN_ON(mdb->old);
1691 }
1692
1693 mdb->old = mdb;
1694 call_rcu_bh(&mdb->rcu, br_mdb_free);
1695
1696 out:
1697 spin_unlock_bh(&br->multicast_lock);
1698 }
1699
1700 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1701 {
1702 int err = -ENOENT;
1703
1704 spin_lock_bh(&br->multicast_lock);
1705 if (!netif_running(br->dev))
1706 goto unlock;
1707
1708 switch (val) {
1709 case 0:
1710 case 2:
1711 del_timer(&br->multicast_router_timer);
1712 /* fall through */
1713 case 1:
1714 br->multicast_router = val;
1715 err = 0;
1716 break;
1717
1718 default:
1719 err = -EINVAL;
1720 break;
1721 }
1722
1723 unlock:
1724 spin_unlock_bh(&br->multicast_lock);
1725
1726 return err;
1727 }
1728
1729 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1730 {
1731 struct net_bridge *br = p->br;
1732 int err = -ENOENT;
1733
1734 spin_lock(&br->multicast_lock);
1735 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1736 goto unlock;
1737
1738 switch (val) {
1739 case 0:
1740 case 1:
1741 case 2:
1742 p->multicast_router = val;
1743 err = 0;
1744
1745 if (val < 2 && !hlist_unhashed(&p->rlist))
1746 hlist_del_init_rcu(&p->rlist);
1747
1748 if (val == 1)
1749 break;
1750
1751 del_timer(&p->multicast_router_timer);
1752
1753 if (val == 0)
1754 break;
1755
1756 br_multicast_add_router(br, p);
1757 break;
1758
1759 default:
1760 err = -EINVAL;
1761 break;
1762 }
1763
1764 unlock:
1765 spin_unlock(&br->multicast_lock);
1766
1767 return err;
1768 }
1769
1770 static void br_multicast_start_querier(struct net_bridge *br)
1771 {
1772 struct net_bridge_port *port;
1773
1774 br_multicast_open(br);
1775
1776 list_for_each_entry(port, &br->port_list, list) {
1777 if (port->state == BR_STATE_DISABLED ||
1778 port->state == BR_STATE_BLOCKING)
1779 continue;
1780
1781 __br_multicast_enable_port(port);
1782 }
1783 }
1784
1785 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1786 {
1787 int err = 0;
1788 struct net_bridge_mdb_htable *mdb;
1789
1790 spin_lock_bh(&br->multicast_lock);
1791 if (br->multicast_disabled == !val)
1792 goto unlock;
1793
1794 br->multicast_disabled = !val;
1795 if (br->multicast_disabled)
1796 goto unlock;
1797
1798 if (!netif_running(br->dev))
1799 goto unlock;
1800
1801 mdb = mlock_dereference(br->mdb, br);
1802 if (mdb) {
1803 if (mdb->old) {
1804 err = -EEXIST;
1805 rollback:
1806 br->multicast_disabled = !!val;
1807 goto unlock;
1808 }
1809
1810 err = br_mdb_rehash(&br->mdb, mdb->max,
1811 br->hash_elasticity);
1812 if (err)
1813 goto rollback;
1814 }
1815
1816 br_multicast_start_querier(br);
1817
1818 unlock:
1819 spin_unlock_bh(&br->multicast_lock);
1820
1821 return err;
1822 }
1823
1824 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1825 {
1826 val = !!val;
1827
1828 spin_lock_bh(&br->multicast_lock);
1829 if (br->multicast_querier == val)
1830 goto unlock;
1831
1832 br->multicast_querier = val;
1833 if (val)
1834 br_multicast_start_querier(br);
1835
1836 unlock:
1837 spin_unlock_bh(&br->multicast_lock);
1838
1839 return 0;
1840 }
1841
1842 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1843 {
1844 int err = -ENOENT;
1845 u32 old;
1846 struct net_bridge_mdb_htable *mdb;
1847
1848 spin_lock_bh(&br->multicast_lock);
1849 if (!netif_running(br->dev))
1850 goto unlock;
1851
1852 err = -EINVAL;
1853 if (!is_power_of_2(val))
1854 goto unlock;
1855
1856 mdb = mlock_dereference(br->mdb, br);
1857 if (mdb && val < mdb->size)
1858 goto unlock;
1859
1860 err = 0;
1861
1862 old = br->hash_max;
1863 br->hash_max = val;
1864
1865 if (mdb) {
1866 if (mdb->old) {
1867 err = -EEXIST;
1868 rollback:
1869 br->hash_max = old;
1870 goto unlock;
1871 }
1872
1873 err = br_mdb_rehash(&br->mdb, br->hash_max,
1874 br->hash_elasticity);
1875 if (err)
1876 goto rollback;
1877 }
1878
1879 unlock:
1880 spin_unlock_bh(&br->multicast_lock);
1881
1882 return err;
1883 }