bridge: prevent flooding IPv6 packets that do not have a listener
[deliverable/linux.git] / net / bridge / br_multicast.c
CommitLineData
eb1d1641
HX
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/if_ether.h>
15#include <linux/igmp.h>
16#include <linux/jhash.h>
17#include <linux/kernel.h>
b195167f 18#include <linux/log2.h>
eb1d1641
HX
19#include <linux/netdevice.h>
20#include <linux/netfilter_bridge.h>
21#include <linux/random.h>
22#include <linux/rculist.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
1c8ad5bf 26#include <linux/inetdevice.h>
eb1d1641 27#include <net/ip.h>
dfd56b8b 28#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
29#include <net/ipv6.h>
30#include <net/mld.h>
d4c4f07d 31#include <net/ip6_checksum.h>
08b202b6 32#endif
eb1d1641
HX
33
34#include "br_private.h"
35
c83b8fab 36static void br_multicast_start_querier(struct net_bridge *br);
2ce297fc 37unsigned int br_mdb_rehash_seq;
c83b8fab 38
8ef2a9a5
YH
39static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
40{
41 if (a->proto != b->proto)
42 return 0;
b0e9a30d
VY
43 if (a->vid != b->vid)
44 return 0;
8ef2a9a5
YH
45 switch (a->proto) {
46 case htons(ETH_P_IP):
47 return a->u.ip4 == b->u.ip4;
dfd56b8b 48#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
49 case htons(ETH_P_IPV6):
50 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
51#endif
8ef2a9a5
YH
52 }
53 return 0;
54}
55
b0e9a30d
VY
56static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
57 __u16 vid)
eb1d1641 58{
b0e9a30d 59 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
eb1d1641
HX
60}
61
dfd56b8b 62#if IS_ENABLED(CONFIG_IPV6)
08b202b6 63static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
b0e9a30d
VY
64 const struct in6_addr *ip,
65 __u16 vid)
08b202b6 66{
b0e9a30d
VY
67 return jhash_2words(ipv6_addr_hash(ip), vid,
68 mdb->secret) & (mdb->max - 1);
08b202b6
YH
69}
70#endif
71
8ef2a9a5
YH
72static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
73 struct br_ip *ip)
74{
75 switch (ip->proto) {
76 case htons(ETH_P_IP):
b0e9a30d 77 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
dfd56b8b 78#if IS_ENABLED(CONFIG_IPV6)
08b202b6 79 case htons(ETH_P_IPV6):
b0e9a30d 80 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
08b202b6 81#endif
8ef2a9a5
YH
82 }
83 return 0;
eb1d1641
HX
84}
85
86static struct net_bridge_mdb_entry *__br_mdb_ip_get(
8ef2a9a5 87 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
eb1d1641
HX
88{
89 struct net_bridge_mdb_entry *mp;
eb1d1641 90
b67bfe0d 91 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
8ef2a9a5 92 if (br_ip_equal(&mp->addr, dst))
eb1d1641
HX
93 return mp;
94 }
95
96 return NULL;
97}
98
cfd56754
CW
99struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
100 struct br_ip *dst)
7f285fa7
HX
101{
102 if (!mdb)
103 return NULL;
104
105 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
106}
107
8ef2a9a5 108static struct net_bridge_mdb_entry *br_mdb_ip4_get(
b0e9a30d 109 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
eb1d1641 110{
8ef2a9a5
YH
111 struct br_ip br_dst;
112
113 br_dst.u.ip4 = dst;
114 br_dst.proto = htons(ETH_P_IP);
b0e9a30d 115 br_dst.vid = vid;
0821ec55 116
7f285fa7 117 return br_mdb_ip_get(mdb, &br_dst);
8ef2a9a5
YH
118}
119
dfd56b8b 120#if IS_ENABLED(CONFIG_IPV6)
08b202b6 121static struct net_bridge_mdb_entry *br_mdb_ip6_get(
b0e9a30d
VY
122 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
123 __u16 vid)
08b202b6
YH
124{
125 struct br_ip br_dst;
0821ec55 126
4e3fd7a0 127 br_dst.u.ip6 = *dst;
08b202b6 128 br_dst.proto = htons(ETH_P_IPV6);
b0e9a30d 129 br_dst.vid = vid;
08b202b6 130
7f285fa7 131 return br_mdb_ip_get(mdb, &br_dst);
08b202b6
YH
132}
133#endif
134
eb1d1641 135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
fbca58a2 136 struct sk_buff *skb, u16 vid)
eb1d1641 137{
e8051688 138 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
8ef2a9a5
YH
139 struct br_ip ip;
140
7f285fa7 141 if (br->multicast_disabled)
eb1d1641
HX
142 return NULL;
143
8ef2a9a5 144 if (BR_INPUT_SKB_CB(skb)->igmp)
eb1d1641
HX
145 return NULL;
146
8ef2a9a5 147 ip.proto = skb->protocol;
fbca58a2 148 ip.vid = vid;
8ef2a9a5 149
eb1d1641
HX
150 switch (skb->protocol) {
151 case htons(ETH_P_IP):
8ef2a9a5
YH
152 ip.u.ip4 = ip_hdr(skb)->daddr;
153 break;
dfd56b8b 154#if IS_ENABLED(CONFIG_IPV6)
08b202b6 155 case htons(ETH_P_IPV6):
4e3fd7a0 156 ip.u.ip6 = ipv6_hdr(skb)->daddr;
08b202b6
YH
157 break;
158#endif
8ef2a9a5
YH
159 default:
160 return NULL;
eb1d1641
HX
161 }
162
8ef2a9a5 163 return br_mdb_ip_get(mdb, &ip);
eb1d1641
HX
164}
165
166static void br_mdb_free(struct rcu_head *head)
167{
168 struct net_bridge_mdb_htable *mdb =
169 container_of(head, struct net_bridge_mdb_htable, rcu);
170 struct net_bridge_mdb_htable *old = mdb->old;
171
172 mdb->old = NULL;
173 kfree(old->mhash);
174 kfree(old);
175}
176
177static int br_mdb_copy(struct net_bridge_mdb_htable *new,
178 struct net_bridge_mdb_htable *old,
179 int elasticity)
180{
181 struct net_bridge_mdb_entry *mp;
eb1d1641
HX
182 int maxlen;
183 int len;
184 int i;
185
186 for (i = 0; i < old->max; i++)
b67bfe0d 187 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
eb1d1641 188 hlist_add_head(&mp->hlist[new->ver],
8ef2a9a5 189 &new->mhash[br_ip_hash(new, &mp->addr)]);
eb1d1641
HX
190
191 if (!elasticity)
192 return 0;
193
194 maxlen = 0;
195 for (i = 0; i < new->max; i++) {
196 len = 0;
b67bfe0d 197 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
eb1d1641
HX
198 len++;
199 if (len > maxlen)
200 maxlen = len;
201 }
202
203 return maxlen > elasticity ? -EINVAL : 0;
204}
205
cfd56754 206void br_multicast_free_pg(struct rcu_head *head)
eb1d1641
HX
207{
208 struct net_bridge_port_group *p =
209 container_of(head, struct net_bridge_port_group, rcu);
210
211 kfree(p);
212}
213
214static void br_multicast_free_group(struct rcu_head *head)
215{
216 struct net_bridge_mdb_entry *mp =
217 container_of(head, struct net_bridge_mdb_entry, rcu);
218
219 kfree(mp);
220}
221
222static void br_multicast_group_expired(unsigned long data)
223{
224 struct net_bridge_mdb_entry *mp = (void *)data;
225 struct net_bridge *br = mp->br;
226 struct net_bridge_mdb_htable *mdb;
227
228 spin_lock(&br->multicast_lock);
229 if (!netif_running(br->dev) || timer_pending(&mp->timer))
230 goto out;
231
8a870178 232 mp->mglist = false;
eb1d1641
HX
233
234 if (mp->ports)
235 goto out;
236
e8051688
ED
237 mdb = mlock_dereference(br->mdb, br);
238
eb1d1641
HX
239 hlist_del_rcu(&mp->hlist[mdb->ver]);
240 mdb->size--;
241
eb1d1641
HX
242 call_rcu_bh(&mp->rcu, br_multicast_free_group);
243
244out:
245 spin_unlock(&br->multicast_lock);
246}
247
248static void br_multicast_del_pg(struct net_bridge *br,
249 struct net_bridge_port_group *pg)
250{
e8051688 251 struct net_bridge_mdb_htable *mdb;
eb1d1641
HX
252 struct net_bridge_mdb_entry *mp;
253 struct net_bridge_port_group *p;
e8051688
ED
254 struct net_bridge_port_group __rcu **pp;
255
256 mdb = mlock_dereference(br->mdb, br);
eb1d1641 257
8ef2a9a5 258 mp = br_mdb_ip_get(mdb, &pg->addr);
eb1d1641
HX
259 if (WARN_ON(!mp))
260 return;
261
e8051688
ED
262 for (pp = &mp->ports;
263 (p = mlock_dereference(*pp, br)) != NULL;
264 pp = &p->next) {
eb1d1641
HX
265 if (p != pg)
266 continue;
267
83f6a740 268 rcu_assign_pointer(*pp, p->next);
eb1d1641
HX
269 hlist_del_init(&p->mglist);
270 del_timer(&p->timer);
eb1d1641
HX
271 call_rcu_bh(&p->rcu, br_multicast_free_pg);
272
c7e8e8a8 273 if (!mp->ports && !mp->mglist && mp->timer_armed &&
eb1d1641
HX
274 netif_running(br->dev))
275 mod_timer(&mp->timer, jiffies);
276
277 return;
278 }
279
280 WARN_ON(1);
281}
282
283static void br_multicast_port_group_expired(unsigned long data)
284{
285 struct net_bridge_port_group *pg = (void *)data;
286 struct net_bridge *br = pg->port->br;
287
288 spin_lock(&br->multicast_lock);
289 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
ccb1c31a 290 hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
eb1d1641
HX
291 goto out;
292
293 br_multicast_del_pg(br, pg);
294
295out:
296 spin_unlock(&br->multicast_lock);
297}
298
e8051688 299static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
eb1d1641
HX
300 int elasticity)
301{
e8051688 302 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
eb1d1641
HX
303 struct net_bridge_mdb_htable *mdb;
304 int err;
305
306 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
307 if (!mdb)
308 return -ENOMEM;
309
310 mdb->max = max;
311 mdb->old = old;
312
313 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
314 if (!mdb->mhash) {
315 kfree(mdb);
316 return -ENOMEM;
317 }
318
319 mdb->size = old ? old->size : 0;
320 mdb->ver = old ? old->ver ^ 1 : 0;
321
322 if (!old || elasticity)
323 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
324 else
325 mdb->secret = old->secret;
326
327 if (!old)
328 goto out;
329
330 err = br_mdb_copy(mdb, old, elasticity);
331 if (err) {
332 kfree(mdb->mhash);
333 kfree(mdb);
334 return err;
335 }
336
2ce297fc 337 br_mdb_rehash_seq++;
eb1d1641
HX
338 call_rcu_bh(&mdb->rcu, br_mdb_free);
339
340out:
341 rcu_assign_pointer(*mdbp, mdb);
342
343 return 0;
344}
345
8ef2a9a5
YH
346static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
347 __be32 group)
eb1d1641
HX
348{
349 struct sk_buff *skb;
350 struct igmphdr *ih;
351 struct ethhdr *eth;
352 struct iphdr *iph;
353
354 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
355 sizeof(*ih) + 4);
356 if (!skb)
357 goto out;
358
359 skb->protocol = htons(ETH_P_IP);
360
361 skb_reset_mac_header(skb);
362 eth = eth_hdr(skb);
363
364 memcpy(eth->h_source, br->dev->dev_addr, 6);
365 eth->h_dest[0] = 1;
366 eth->h_dest[1] = 0;
367 eth->h_dest[2] = 0x5e;
368 eth->h_dest[3] = 0;
369 eth->h_dest[4] = 0;
370 eth->h_dest[5] = 1;
371 eth->h_proto = htons(ETH_P_IP);
372 skb_put(skb, sizeof(*eth));
373
374 skb_set_network_header(skb, skb->len);
375 iph = ip_hdr(skb);
376
377 iph->version = 4;
378 iph->ihl = 6;
379 iph->tos = 0xc0;
380 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
381 iph->id = 0;
382 iph->frag_off = htons(IP_DF);
383 iph->ttl = 1;
384 iph->protocol = IPPROTO_IGMP;
1c8ad5bf
CW
385 iph->saddr = br->multicast_query_use_ifaddr ?
386 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
eb1d1641
HX
387 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
388 ((u8 *)&iph[1])[0] = IPOPT_RA;
389 ((u8 *)&iph[1])[1] = 4;
390 ((u8 *)&iph[1])[2] = 0;
391 ((u8 *)&iph[1])[3] = 0;
392 ip_send_check(iph);
393 skb_put(skb, 24);
394
395 skb_set_transport_header(skb, skb->len);
396 ih = igmp_hdr(skb);
397 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
398 ih->code = (group ? br->multicast_last_member_interval :
399 br->multicast_query_response_interval) /
400 (HZ / IGMP_TIMER_SCALE);
401 ih->group = group;
402 ih->csum = 0;
403 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
404 skb_put(skb, sizeof(*ih));
405
406 __skb_pull(skb, sizeof(*eth));
407
408out:
409 return skb;
410}
411
dfd56b8b 412#if IS_ENABLED(CONFIG_IPV6)
08b202b6 413static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
b71d1d42 414 const struct in6_addr *group)
08b202b6
YH
415{
416 struct sk_buff *skb;
417 struct ipv6hdr *ip6h;
418 struct mld_msg *mldq;
419 struct ethhdr *eth;
420 u8 *hopopt;
421 unsigned long interval;
422
423 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
424 8 + sizeof(*mldq));
425 if (!skb)
426 goto out;
427
428 skb->protocol = htons(ETH_P_IPV6);
429
430 /* Ethernet header */
431 skb_reset_mac_header(skb);
432 eth = eth_hdr(skb);
433
434 memcpy(eth->h_source, br->dev->dev_addr, 6);
08b202b6
YH
435 eth->h_proto = htons(ETH_P_IPV6);
436 skb_put(skb, sizeof(*eth));
437
438 /* IPv6 header + HbH option */
439 skb_set_network_header(skb, skb->len);
440 ip6h = ipv6_hdr(skb);
441
442 *(__force __be32 *)ip6h = htonl(0x60000000);
76d66158 443 ip6h->payload_len = htons(8 + sizeof(*mldq));
08b202b6
YH
444 ip6h->nexthdr = IPPROTO_HOPOPTS;
445 ip6h->hop_limit = 1;
a7bff75b 446 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
d1d81d4c
UW
447 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
448 &ip6h->saddr)) {
449 kfree_skb(skb);
450 return NULL;
451 }
36cff5a1 452 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
08b202b6
YH
453
454 hopopt = (u8 *)(ip6h + 1);
455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
456 hopopt[1] = 0; /* length of HbH */
457 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
458 hopopt[3] = 2; /* Length of RA Option */
459 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
460 hopopt[5] = 0;
1de5a71c
EZ
461 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
462 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
08b202b6
YH
463
464 skb_put(skb, sizeof(*ip6h) + 8);
465
466 /* ICMPv6 */
467 skb_set_transport_header(skb, skb->len);
468 mldq = (struct mld_msg *) icmp6_hdr(skb);
469
32de868c
LL
470 interval = ipv6_addr_any(group) ?
471 br->multicast_query_response_interval :
472 br->multicast_last_member_interval;
08b202b6
YH
473
474 mldq->mld_type = ICMPV6_MGM_QUERY;
475 mldq->mld_code = 0;
476 mldq->mld_cksum = 0;
477 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
478 mldq->mld_reserved = 0;
4e3fd7a0 479 mldq->mld_mca = *group;
08b202b6
YH
480
481 /* checksum */
482 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
483 sizeof(*mldq), IPPROTO_ICMPV6,
484 csum_partial(mldq,
485 sizeof(*mldq), 0));
486 skb_put(skb, sizeof(*mldq));
487
488 __skb_pull(skb, sizeof(*eth));
489
490out:
491 return skb;
492}
493#endif
494
8ef2a9a5
YH
495static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
496 struct br_ip *addr)
497{
498 switch (addr->proto) {
499 case htons(ETH_P_IP):
500 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
dfd56b8b 501#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
502 case htons(ETH_P_IPV6):
503 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
504#endif
8ef2a9a5
YH
505 }
506 return NULL;
507}
508
eb1d1641 509static struct net_bridge_mdb_entry *br_multicast_get_group(
8ef2a9a5
YH
510 struct net_bridge *br, struct net_bridge_port *port,
511 struct br_ip *group, int hash)
eb1d1641 512{
e8051688 513 struct net_bridge_mdb_htable *mdb;
eb1d1641 514 struct net_bridge_mdb_entry *mp;
95c96174
ED
515 unsigned int count = 0;
516 unsigned int max;
eb1d1641
HX
517 int elasticity;
518 int err;
519
e8051688 520 mdb = rcu_dereference_protected(br->mdb, 1);
b67bfe0d 521 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
eb1d1641 522 count++;
8ef2a9a5 523 if (unlikely(br_ip_equal(group, &mp->addr)))
eb1d1641 524 return mp;
eb1d1641
HX
525 }
526
527 elasticity = 0;
528 max = mdb->max;
529
530 if (unlikely(count > br->hash_elasticity && count)) {
531 if (net_ratelimit())
28a16c97 532 br_info(br, "Multicast hash table "
533 "chain limit reached: %s\n",
534 port ? port->dev->name : br->dev->name);
eb1d1641
HX
535
536 elasticity = br->hash_elasticity;
537 }
538
539 if (mdb->size >= max) {
540 max *= 2;
036be6db
TG
541 if (unlikely(max > br->hash_max)) {
542 br_warn(br, "Multicast hash table maximum of %d "
543 "reached, disabling snooping: %s\n",
544 br->hash_max,
545 port ? port->dev->name : br->dev->name);
eb1d1641
HX
546 err = -E2BIG;
547disable:
548 br->multicast_disabled = 1;
549 goto err;
550 }
551 }
552
553 if (max > mdb->max || elasticity) {
554 if (mdb->old) {
555 if (net_ratelimit())
28a16c97 556 br_info(br, "Multicast hash table "
557 "on fire: %s\n",
558 port ? port->dev->name : br->dev->name);
eb1d1641
HX
559 err = -EEXIST;
560 goto err;
561 }
562
563 err = br_mdb_rehash(&br->mdb, max, elasticity);
564 if (err) {
28a16c97 565 br_warn(br, "Cannot rehash multicast "
566 "hash table, disabling snooping: %s, %d, %d\n",
567 port ? port->dev->name : br->dev->name,
568 mdb->size, err);
eb1d1641
HX
569 goto disable;
570 }
571
572 err = -EAGAIN;
573 goto err;
574 }
575
576 return NULL;
577
578err:
579 mp = ERR_PTR(err);
580 return mp;
581}
582
cfd56754
CW
583struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
584 struct net_bridge_port *port, struct br_ip *group)
eb1d1641 585{
e8051688 586 struct net_bridge_mdb_htable *mdb;
eb1d1641
HX
587 struct net_bridge_mdb_entry *mp;
588 int hash;
4c0833bc 589 int err;
eb1d1641 590
e8051688 591 mdb = rcu_dereference_protected(br->mdb, 1);
eb1d1641 592 if (!mdb) {
4c0833bc
TK
593 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
594 if (err)
595 return ERR_PTR(err);
eb1d1641
HX
596 goto rehash;
597 }
598
599 hash = br_ip_hash(mdb, group);
600 mp = br_multicast_get_group(br, port, group, hash);
601 switch (PTR_ERR(mp)) {
602 case 0:
603 break;
604
605 case -EAGAIN:
606rehash:
e8051688 607 mdb = rcu_dereference_protected(br->mdb, 1);
eb1d1641
HX
608 hash = br_ip_hash(mdb, group);
609 break;
610
611 default:
612 goto out;
613 }
614
615 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
616 if (unlikely(!mp))
4c0833bc 617 return ERR_PTR(-ENOMEM);
eb1d1641
HX
618
619 mp->br = br;
8ef2a9a5 620 mp->addr = *group;
eb1d1641 621
1faabf2a
ED
622 setup_timer(&mp->timer, br_multicast_group_expired,
623 (unsigned long)mp);
624
eb1d1641
HX
625 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
626 mdb->size++;
627
628out:
629 return mp;
630}
631
cfd56754
CW
632struct net_bridge_port_group *br_multicast_new_port_group(
633 struct net_bridge_port *port,
634 struct br_ip *group,
ccb1c31a
AW
635 struct net_bridge_port_group __rcu *next,
636 unsigned char state)
cfd56754
CW
637{
638 struct net_bridge_port_group *p;
639
640 p = kzalloc(sizeof(*p), GFP_ATOMIC);
641 if (unlikely(!p))
642 return NULL;
643
644 p->addr = *group;
645 p->port = port;
ccb1c31a 646 p->state = state;
eca2a43b 647 rcu_assign_pointer(p->next, next);
cfd56754
CW
648 hlist_add_head(&p->mglist, &port->mglist);
649 setup_timer(&p->timer, br_multicast_port_group_expired,
650 (unsigned long)p);
651 return p;
652}
653
eb1d1641 654static int br_multicast_add_group(struct net_bridge *br,
8ef2a9a5
YH
655 struct net_bridge_port *port,
656 struct br_ip *group)
eb1d1641
HX
657{
658 struct net_bridge_mdb_entry *mp;
659 struct net_bridge_port_group *p;
e8051688 660 struct net_bridge_port_group __rcu **pp;
eb1d1641
HX
661 int err;
662
eb1d1641
HX
663 spin_lock(&br->multicast_lock);
664 if (!netif_running(br->dev) ||
665 (port && port->state == BR_STATE_DISABLED))
666 goto out;
667
668 mp = br_multicast_new_group(br, port, group);
669 err = PTR_ERR(mp);
4c0833bc 670 if (IS_ERR(mp))
eb1d1641
HX
671 goto err;
672
673 if (!port) {
8a870178 674 mp->mglist = true;
eb1d1641
HX
675 goto out;
676 }
677
e8051688
ED
678 for (pp = &mp->ports;
679 (p = mlock_dereference(*pp, br)) != NULL;
680 pp = &p->next) {
eb1d1641 681 if (p->port == port)
9f00b2e7 682 goto out;
eb1d1641
HX
683 if ((unsigned long)p->port < (unsigned long)port)
684 break;
685 }
686
ccb1c31a 687 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
eb1d1641
HX
688 if (unlikely(!p))
689 goto err;
eb1d1641 690 rcu_assign_pointer(*pp, p);
37a393bc 691 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
eb1d1641 692
eb1d1641
HX
693out:
694 err = 0;
695
696err:
697 spin_unlock(&br->multicast_lock);
698 return err;
699}
700
8ef2a9a5
YH
701static int br_ip4_multicast_add_group(struct net_bridge *br,
702 struct net_bridge_port *port,
b0e9a30d
VY
703 __be32 group,
704 __u16 vid)
8ef2a9a5
YH
705{
706 struct br_ip br_group;
707
708 if (ipv4_is_local_multicast(group))
709 return 0;
710
711 br_group.u.ip4 = group;
712 br_group.proto = htons(ETH_P_IP);
b0e9a30d 713 br_group.vid = vid;
8ef2a9a5
YH
714
715 return br_multicast_add_group(br, port, &br_group);
716}
717
dfd56b8b 718#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
719static int br_ip6_multicast_add_group(struct net_bridge *br,
720 struct net_bridge_port *port,
b0e9a30d
VY
721 const struct in6_addr *group,
722 __u16 vid)
08b202b6
YH
723{
724 struct br_ip br_group;
725
e4de9f9e 726 if (!ipv6_is_transient_multicast(group))
08b202b6
YH
727 return 0;
728
4e3fd7a0 729 br_group.u.ip6 = *group;
9cc6e0c4 730 br_group.proto = htons(ETH_P_IPV6);
b0e9a30d 731 br_group.vid = vid;
08b202b6
YH
732
733 return br_multicast_add_group(br, port, &br_group);
734}
735#endif
736
eb1d1641
HX
737static void br_multicast_router_expired(unsigned long data)
738{
739 struct net_bridge_port *port = (void *)data;
740 struct net_bridge *br = port->br;
741
742 spin_lock(&br->multicast_lock);
743 if (port->multicast_router != 1 ||
744 timer_pending(&port->multicast_router_timer) ||
745 hlist_unhashed(&port->rlist))
746 goto out;
747
748 hlist_del_init_rcu(&port->rlist);
749
750out:
751 spin_unlock(&br->multicast_lock);
752}
753
754static void br_multicast_local_router_expired(unsigned long data)
755{
756}
757
c83b8fab
HX
758static void br_multicast_querier_expired(unsigned long data)
759{
bb63f1f8 760 struct net_bridge *br = (void *)data;
c83b8fab
HX
761
762 spin_lock(&br->multicast_lock);
763 if (!netif_running(br->dev) || br->multicast_disabled)
764 goto out;
765
766 br_multicast_start_querier(br);
767
768out:
769 spin_unlock(&br->multicast_lock);
770}
771
8ef2a9a5
YH
772static void __br_multicast_send_query(struct net_bridge *br,
773 struct net_bridge_port *port,
774 struct br_ip *ip)
eb1d1641 775{
eb1d1641
HX
776 struct sk_buff *skb;
777
8ef2a9a5 778 skb = br_multicast_alloc_query(br, ip);
eb1d1641 779 if (!skb)
8ef2a9a5 780 return;
eb1d1641
HX
781
782 if (port) {
783 __skb_push(skb, sizeof(struct ethhdr));
784 skb->dev = port->dev;
713aefa3 785 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
eb1d1641
HX
786 dev_queue_xmit);
787 } else
788 netif_rx(skb);
8ef2a9a5
YH
789}
790
791static void br_multicast_send_query(struct net_bridge *br,
792 struct net_bridge_port *port, u32 sent)
793{
794 unsigned long time;
795 struct br_ip br_group;
796
797 if (!netif_running(br->dev) || br->multicast_disabled ||
c5c23260 798 !br->multicast_querier ||
8ef2a9a5
YH
799 timer_pending(&br->multicast_querier_timer))
800 return;
801
08b202b6
YH
802 memset(&br_group.u, 0, sizeof(br_group.u));
803
8ef2a9a5 804 br_group.proto = htons(ETH_P_IP);
08b202b6 805 __br_multicast_send_query(br, port, &br_group);
8ef2a9a5 806
dfd56b8b 807#if IS_ENABLED(CONFIG_IPV6)
08b202b6 808 br_group.proto = htons(ETH_P_IPV6);
8ef2a9a5 809 __br_multicast_send_query(br, port, &br_group);
08b202b6 810#endif
eb1d1641 811
eb1d1641
HX
812 time = jiffies;
813 time += sent < br->multicast_startup_query_count ?
814 br->multicast_startup_query_interval :
815 br->multicast_query_interval;
816 mod_timer(port ? &port->multicast_query_timer :
817 &br->multicast_query_timer, time);
818}
819
820static void br_multicast_port_query_expired(unsigned long data)
821{
822 struct net_bridge_port *port = (void *)data;
823 struct net_bridge *br = port->br;
824
825 spin_lock(&br->multicast_lock);
02a780c0
DC
826 if (port->state == BR_STATE_DISABLED ||
827 port->state == BR_STATE_BLOCKING)
eb1d1641
HX
828 goto out;
829
830 if (port->multicast_startup_queries_sent <
831 br->multicast_startup_query_count)
832 port->multicast_startup_queries_sent++;
833
834 br_multicast_send_query(port->br, port,
835 port->multicast_startup_queries_sent);
836
837out:
838 spin_unlock(&br->multicast_lock);
839}
840
841void br_multicast_add_port(struct net_bridge_port *port)
842{
843 port->multicast_router = 1;
844
845 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
846 (unsigned long)port);
847 setup_timer(&port->multicast_query_timer,
848 br_multicast_port_query_expired, (unsigned long)port);
849}
850
851void br_multicast_del_port(struct net_bridge_port *port)
852{
853 del_timer_sync(&port->multicast_router_timer);
854}
855
561f1103
HX
856static void __br_multicast_enable_port(struct net_bridge_port *port)
857{
858 port->multicast_startup_queries_sent = 0;
859
860 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
861 del_timer(&port->multicast_query_timer))
862 mod_timer(&port->multicast_query_timer, jiffies);
863}
864
eb1d1641
HX
865void br_multicast_enable_port(struct net_bridge_port *port)
866{
867 struct net_bridge *br = port->br;
868
869 spin_lock(&br->multicast_lock);
870 if (br->multicast_disabled || !netif_running(br->dev))
871 goto out;
872
561f1103 873 __br_multicast_enable_port(port);
eb1d1641
HX
874
875out:
876 spin_unlock(&br->multicast_lock);
877}
878
879void br_multicast_disable_port(struct net_bridge_port *port)
880{
881 struct net_bridge *br = port->br;
882 struct net_bridge_port_group *pg;
b67bfe0d 883 struct hlist_node *n;
eb1d1641
HX
884
885 spin_lock(&br->multicast_lock);
b67bfe0d 886 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
eb1d1641
HX
887 br_multicast_del_pg(br, pg);
888
889 if (!hlist_unhashed(&port->rlist))
890 hlist_del_init_rcu(&port->rlist);
891 del_timer(&port->multicast_router_timer);
892 del_timer(&port->multicast_query_timer);
893 spin_unlock(&br->multicast_lock);
894}
895
8ef2a9a5
YH
896static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
897 struct net_bridge_port *port,
898 struct sk_buff *skb)
eb1d1641
HX
899{
900 struct igmpv3_report *ih;
901 struct igmpv3_grec *grec;
902 int i;
903 int len;
904 int num;
905 int type;
906 int err = 0;
907 __be32 group;
b0e9a30d 908 u16 vid = 0;
eb1d1641
HX
909
910 if (!pskb_may_pull(skb, sizeof(*ih)))
911 return -EINVAL;
912
b0e9a30d 913 br_vlan_get_tag(skb, &vid);
eb1d1641
HX
914 ih = igmpv3_report_hdr(skb);
915 num = ntohs(ih->ngrec);
916 len = sizeof(*ih);
917
918 for (i = 0; i < num; i++) {
919 len += sizeof(*grec);
920 if (!pskb_may_pull(skb, len))
921 return -EINVAL;
922
fd218cf9 923 grec = (void *)(skb->data + len - sizeof(*grec));
eb1d1641
HX
924 group = grec->grec_mca;
925 type = grec->grec_type;
926
8eabf95c 927 len += ntohs(grec->grec_nsrcs) * 4;
eb1d1641
HX
928 if (!pskb_may_pull(skb, len))
929 return -EINVAL;
930
931 /* We treat this as an IGMPv2 report for now. */
932 switch (type) {
933 case IGMPV3_MODE_IS_INCLUDE:
934 case IGMPV3_MODE_IS_EXCLUDE:
935 case IGMPV3_CHANGE_TO_INCLUDE:
936 case IGMPV3_CHANGE_TO_EXCLUDE:
937 case IGMPV3_ALLOW_NEW_SOURCES:
938 case IGMPV3_BLOCK_OLD_SOURCES:
939 break;
940
941 default:
942 continue;
943 }
944
b0e9a30d 945 err = br_ip4_multicast_add_group(br, port, group, vid);
eb1d1641
HX
946 if (err)
947 break;
948 }
949
950 return err;
951}
952
dfd56b8b 953#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
954static int br_ip6_multicast_mld2_report(struct net_bridge *br,
955 struct net_bridge_port *port,
956 struct sk_buff *skb)
957{
958 struct icmp6hdr *icmp6h;
959 struct mld2_grec *grec;
960 int i;
961 int len;
962 int num;
963 int err = 0;
b0e9a30d 964 u16 vid = 0;
08b202b6
YH
965
966 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
967 return -EINVAL;
968
b0e9a30d 969 br_vlan_get_tag(skb, &vid);
08b202b6
YH
970 icmp6h = icmp6_hdr(skb);
971 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
972 len = sizeof(*icmp6h);
973
974 for (i = 0; i < num; i++) {
975 __be16 *nsrcs, _nsrcs;
976
977 nsrcs = skb_header_pointer(skb,
978 len + offsetof(struct mld2_grec,
649e984d 979 grec_nsrcs),
08b202b6
YH
980 sizeof(_nsrcs), &_nsrcs);
981 if (!nsrcs)
982 return -EINVAL;
983
984 if (!pskb_may_pull(skb,
985 len + sizeof(*grec) +
d41db9f3 986 sizeof(struct in6_addr) * ntohs(*nsrcs)))
08b202b6
YH
987 return -EINVAL;
988
989 grec = (struct mld2_grec *)(skb->data + len);
d41db9f3
LL
990 len += sizeof(*grec) +
991 sizeof(struct in6_addr) * ntohs(*nsrcs);
08b202b6
YH
992
993 /* We treat these as MLDv1 reports for now. */
994 switch (grec->grec_type) {
995 case MLD2_MODE_IS_INCLUDE:
996 case MLD2_MODE_IS_EXCLUDE:
997 case MLD2_CHANGE_TO_INCLUDE:
998 case MLD2_CHANGE_TO_EXCLUDE:
999 case MLD2_ALLOW_NEW_SOURCES:
1000 case MLD2_BLOCK_OLD_SOURCES:
1001 break;
1002
1003 default:
1004 continue;
1005 }
1006
b0e9a30d
VY
1007 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
1008 vid);
08b202b6
YH
1009 if (!err)
1010 break;
1011 }
1012
1013 return err;
1014}
1015#endif
1016
b00589af
LL
1017static void br_multicast_update_querier_timer(struct net_bridge *br,
1018 unsigned long max_delay)
1019{
1020 if (!timer_pending(&br->multicast_querier_timer))
1021 br->multicast_querier_delay_time = jiffies + max_delay;
1022
1023 mod_timer(&br->multicast_querier_timer,
1024 jiffies + br->multicast_querier_interval);
1025}
1026
7e80c124 1027/*
7c77602f 1028 * Add port to router_list
7e80c124 1029 * list is maintained ordered by pointer value
1030 * and locked by br->multicast_lock and RCU
1031 */
0909e117
HX
1032static void br_multicast_add_router(struct net_bridge *br,
1033 struct net_bridge_port *port)
1034{
dcdca2c4 1035 struct net_bridge_port *p;
b67bfe0d 1036 struct hlist_node *slot = NULL;
dcdca2c4 1037
b67bfe0d 1038 hlist_for_each_entry(p, &br->router_list, rlist) {
7e80c124 1039 if ((unsigned long) port >= (unsigned long) p)
1040 break;
b67bfe0d 1041 slot = &p->rlist;
dcdca2c4 1042 }
1043
7e80c124 1044 if (slot)
1045 hlist_add_after_rcu(slot, &port->rlist);
dcdca2c4 1046 else
1047 hlist_add_head_rcu(&port->rlist, &br->router_list);
0909e117
HX
1048}
1049
eb1d1641
HX
1050static void br_multicast_mark_router(struct net_bridge *br,
1051 struct net_bridge_port *port)
1052{
1053 unsigned long now = jiffies;
eb1d1641
HX
1054
1055 if (!port) {
1056 if (br->multicast_router == 1)
1057 mod_timer(&br->multicast_router_timer,
1058 now + br->multicast_querier_interval);
1059 return;
1060 }
1061
1062 if (port->multicast_router != 1)
1063 return;
1064
1065 if (!hlist_unhashed(&port->rlist))
1066 goto timer;
1067
0909e117 1068 br_multicast_add_router(br, port);
eb1d1641
HX
1069
1070timer:
1071 mod_timer(&port->multicast_router_timer,
1072 now + br->multicast_querier_interval);
1073}
1074
1075static void br_multicast_query_received(struct net_bridge *br,
1076 struct net_bridge_port *port,
b00589af
LL
1077 int saddr,
1078 unsigned long max_delay)
eb1d1641
HX
1079{
1080 if (saddr)
b00589af 1081 br_multicast_update_querier_timer(br, max_delay);
eb1d1641
HX
1082 else if (timer_pending(&br->multicast_querier_timer))
1083 return;
1084
1085 br_multicast_mark_router(br, port);
1086}
1087
8ef2a9a5
YH
1088static int br_ip4_multicast_query(struct net_bridge *br,
1089 struct net_bridge_port *port,
1090 struct sk_buff *skb)
eb1d1641 1091{
b71d1d42 1092 const struct iphdr *iph = ip_hdr(skb);
eb1d1641
HX
1093 struct igmphdr *ih = igmp_hdr(skb);
1094 struct net_bridge_mdb_entry *mp;
1095 struct igmpv3_query *ih3;
1096 struct net_bridge_port_group *p;
e8051688 1097 struct net_bridge_port_group __rcu **pp;
eb1d1641
HX
1098 unsigned long max_delay;
1099 unsigned long now = jiffies;
1100 __be32 group;
bec68ff1 1101 int err = 0;
b0e9a30d 1102 u16 vid = 0;
eb1d1641
HX
1103
1104 spin_lock(&br->multicast_lock);
1105 if (!netif_running(br->dev) ||
1106 (port && port->state == BR_STATE_DISABLED))
1107 goto out;
1108
eb1d1641
HX
1109 group = ih->group;
1110
1111 if (skb->len == sizeof(*ih)) {
1112 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1113
1114 if (!max_delay) {
1115 max_delay = 10 * HZ;
1116 group = 0;
1117 }
1118 } else {
bec68ff1
YH
1119 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
1120 err = -EINVAL;
1121 goto out;
1122 }
eb1d1641
HX
1123
1124 ih3 = igmpv3_query_hdr(skb);
1125 if (ih3->nsrcs)
bec68ff1 1126 goto out;
eb1d1641 1127
0ba8c9ec
YH
1128 max_delay = ih3->code ?
1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
eb1d1641
HX
1130 }
1131
b00589af
LL
1132 br_multicast_query_received(br, port, !!iph->saddr, max_delay);
1133
eb1d1641
HX
1134 if (!group)
1135 goto out;
1136
b0e9a30d
VY
1137 br_vlan_get_tag(skb, &vid);
1138 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
eb1d1641
HX
1139 if (!mp)
1140 goto out;
1141
9f00b2e7
CW
1142 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1143 mp->timer_armed = true;
1144
eb1d1641
HX
1145 max_delay *= br->multicast_last_member_count;
1146
8a870178 1147 if (mp->mglist &&
eb1d1641
HX
1148 (timer_pending(&mp->timer) ?
1149 time_after(mp->timer.expires, now + max_delay) :
1150 try_to_del_timer_sync(&mp->timer) >= 0))
1151 mod_timer(&mp->timer, now + max_delay);
1152
e8051688
ED
1153 for (pp = &mp->ports;
1154 (p = mlock_dereference(*pp, br)) != NULL;
1155 pp = &p->next) {
eb1d1641
HX
1156 if (timer_pending(&p->timer) ?
1157 time_after(p->timer.expires, now + max_delay) :
1158 try_to_del_timer_sync(&p->timer) >= 0)
24f9cdcb 1159 mod_timer(&p->timer, now + max_delay);
eb1d1641
HX
1160 }
1161
1162out:
1163 spin_unlock(&br->multicast_lock);
bec68ff1 1164 return err;
eb1d1641
HX
1165}
1166
dfd56b8b 1167#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
1168static int br_ip6_multicast_query(struct net_bridge *br,
1169 struct net_bridge_port *port,
1170 struct sk_buff *skb)
1171{
b71d1d42 1172 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
eca2a43b 1173 struct mld_msg *mld;
08b202b6
YH
1174 struct net_bridge_mdb_entry *mp;
1175 struct mld2_query *mld2q;
e8051688
ED
1176 struct net_bridge_port_group *p;
1177 struct net_bridge_port_group __rcu **pp;
08b202b6
YH
1178 unsigned long max_delay;
1179 unsigned long now = jiffies;
b71d1d42 1180 const struct in6_addr *group = NULL;
08b202b6 1181 int err = 0;
b0e9a30d 1182 u16 vid = 0;
08b202b6
YH
1183
1184 spin_lock(&br->multicast_lock);
1185 if (!netif_running(br->dev) ||
1186 (port && port->state == BR_STATE_DISABLED))
1187 goto out;
1188
08b202b6
YH
1189 if (skb->len == sizeof(*mld)) {
1190 if (!pskb_may_pull(skb, sizeof(*mld))) {
1191 err = -EINVAL;
1192 goto out;
1193 }
1194 mld = (struct mld_msg *) icmp6_hdr(skb);
4715213d 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
08b202b6
YH
1196 if (max_delay)
1197 group = &mld->mld_mca;
248ba8ec 1198 } else {
08b202b6
YH
1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1200 err = -EINVAL;
1201 goto out;
1202 }
1203 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1204 if (!mld2q->mld2q_nsrcs)
1205 group = &mld2q->mld2q_mca;
e3f5b170
DB
1206
1207 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
08b202b6
YH
1208 }
1209
b00589af
LL
1210 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr),
1211 max_delay);
1212
08b202b6
YH
1213 if (!group)
1214 goto out;
1215
b0e9a30d
VY
1216 br_vlan_get_tag(skb, &vid);
1217 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
08b202b6
YH
1218 if (!mp)
1219 goto out;
1220
9f00b2e7
CW
1221 mod_timer(&mp->timer, now + br->multicast_membership_interval);
1222 mp->timer_armed = true;
1223
08b202b6 1224 max_delay *= br->multicast_last_member_count;
8a870178 1225 if (mp->mglist &&
08b202b6
YH
1226 (timer_pending(&mp->timer) ?
1227 time_after(mp->timer.expires, now + max_delay) :
1228 try_to_del_timer_sync(&mp->timer) >= 0))
1229 mod_timer(&mp->timer, now + max_delay);
1230
e8051688
ED
1231 for (pp = &mp->ports;
1232 (p = mlock_dereference(*pp, br)) != NULL;
1233 pp = &p->next) {
08b202b6
YH
1234 if (timer_pending(&p->timer) ?
1235 time_after(p->timer.expires, now + max_delay) :
1236 try_to_del_timer_sync(&p->timer) >= 0)
24f9cdcb 1237 mod_timer(&p->timer, now + max_delay);
08b202b6
YH
1238 }
1239
1240out:
1241 spin_unlock(&br->multicast_lock);
1242 return err;
1243}
1244#endif
1245
eb1d1641
HX
1246static void br_multicast_leave_group(struct net_bridge *br,
1247 struct net_bridge_port *port,
8ef2a9a5 1248 struct br_ip *group)
eb1d1641
HX
1249{
1250 struct net_bridge_mdb_htable *mdb;
1251 struct net_bridge_mdb_entry *mp;
1252 struct net_bridge_port_group *p;
1253 unsigned long now;
1254 unsigned long time;
1255
eb1d1641
HX
1256 spin_lock(&br->multicast_lock);
1257 if (!netif_running(br->dev) ||
1258 (port && port->state == BR_STATE_DISABLED) ||
1259 timer_pending(&br->multicast_querier_timer))
1260 goto out;
1261
e8051688 1262 mdb = mlock_dereference(br->mdb, br);
eb1d1641
HX
1263 mp = br_mdb_ip_get(mdb, group);
1264 if (!mp)
1265 goto out;
1266
6b7df111
CW
1267 if (br->multicast_querier &&
1268 !timer_pending(&br->multicast_querier_timer)) {
1269 __br_multicast_send_query(br, port, &mp->addr);
1270
1271 time = jiffies + br->multicast_last_member_count *
1272 br->multicast_last_member_interval;
1273 mod_timer(port ? &port->multicast_query_timer :
1274 &br->multicast_query_timer, time);
1275
1276 for (p = mlock_dereference(mp->ports, br);
1277 p != NULL;
1278 p = mlock_dereference(p->next, br)) {
1279 if (p->port != port)
1280 continue;
1281
1282 if (!hlist_unhashed(&p->mglist) &&
1283 (timer_pending(&p->timer) ?
1284 time_after(p->timer.expires, time) :
1285 try_to_del_timer_sync(&p->timer) >= 0)) {
1286 mod_timer(&p->timer, time);
1287 }
1288
1289 break;
1290 }
1291 }
1292
c2d3babf 1293 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
50426b59
AW
1294 struct net_bridge_port_group __rcu **pp;
1295
1296 for (pp = &mp->ports;
1297 (p = mlock_dereference(*pp, br)) != NULL;
1298 pp = &p->next) {
1299 if (p->port != port)
1300 continue;
1301
1302 rcu_assign_pointer(*pp, p->next);
1303 hlist_del_init(&p->mglist);
1304 del_timer(&p->timer);
1305 call_rcu_bh(&p->rcu, br_multicast_free_pg);
37a393bc 1306 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
50426b59 1307
9f00b2e7 1308 if (!mp->ports && !mp->mglist && mp->timer_armed &&
50426b59
AW
1309 netif_running(br->dev))
1310 mod_timer(&mp->timer, jiffies);
1311 }
1312 goto out;
1313 }
1314
eb1d1641
HX
1315 now = jiffies;
1316 time = now + br->multicast_last_member_count *
1317 br->multicast_last_member_interval;
1318
1319 if (!port) {
9f00b2e7 1320 if (mp->mglist && mp->timer_armed &&
eb1d1641
HX
1321 (timer_pending(&mp->timer) ?
1322 time_after(mp->timer.expires, time) :
1323 try_to_del_timer_sync(&mp->timer) >= 0)) {
1324 mod_timer(&mp->timer, time);
eb1d1641 1325 }
eb1d1641
HX
1326 }
1327
1328out:
1329 spin_unlock(&br->multicast_lock);
1330}
1331
8ef2a9a5
YH
1332static void br_ip4_multicast_leave_group(struct net_bridge *br,
1333 struct net_bridge_port *port,
b0e9a30d
VY
1334 __be32 group,
1335 __u16 vid)
8ef2a9a5
YH
1336{
1337 struct br_ip br_group;
1338
1339 if (ipv4_is_local_multicast(group))
1340 return;
1341
1342 br_group.u.ip4 = group;
1343 br_group.proto = htons(ETH_P_IP);
b0e9a30d 1344 br_group.vid = vid;
8ef2a9a5
YH
1345
1346 br_multicast_leave_group(br, port, &br_group);
1347}
1348
dfd56b8b 1349#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
1350static void br_ip6_multicast_leave_group(struct net_bridge *br,
1351 struct net_bridge_port *port,
b0e9a30d
VY
1352 const struct in6_addr *group,
1353 __u16 vid)
08b202b6
YH
1354{
1355 struct br_ip br_group;
1356
e4de9f9e 1357 if (!ipv6_is_transient_multicast(group))
08b202b6
YH
1358 return;
1359
4e3fd7a0 1360 br_group.u.ip6 = *group;
08b202b6 1361 br_group.proto = htons(ETH_P_IPV6);
b0e9a30d 1362 br_group.vid = vid;
08b202b6
YH
1363
1364 br_multicast_leave_group(br, port, &br_group);
1365}
1366#endif
8ef2a9a5 1367
eb1d1641
HX
1368static int br_multicast_ipv4_rcv(struct net_bridge *br,
1369 struct net_bridge_port *port,
1370 struct sk_buff *skb)
1371{
1372 struct sk_buff *skb2 = skb;
b71d1d42 1373 const struct iphdr *iph;
eb1d1641 1374 struct igmphdr *ih;
95c96174
ED
1375 unsigned int len;
1376 unsigned int offset;
eb1d1641 1377 int err;
b0e9a30d 1378 u16 vid = 0;
eb1d1641 1379
eb1d1641
HX
1380 /* We treat OOM as packet loss for now. */
1381 if (!pskb_may_pull(skb, sizeof(*iph)))
1382 return -EINVAL;
1383
1384 iph = ip_hdr(skb);
1385
1386 if (iph->ihl < 5 || iph->version != 4)
1387 return -EINVAL;
1388
1389 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
1390 return -EINVAL;
1391
1392 iph = ip_hdr(skb);
1393
1394 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1395 return -EINVAL;
1396
bd4265fe 1397 if (iph->protocol != IPPROTO_IGMP) {
bf5e4dd6 1398 if (!ipv4_is_local_multicast(iph->daddr))
bd4265fe 1399 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
eb1d1641 1400 return 0;
bd4265fe 1401 }
eb1d1641
HX
1402
1403 len = ntohs(iph->tot_len);
1404 if (skb->len < len || len < ip_hdrlen(skb))
1405 return -EINVAL;
1406
1407 if (skb->len > len) {
1408 skb2 = skb_clone(skb, GFP_ATOMIC);
1409 if (!skb2)
1410 return -ENOMEM;
1411
1412 err = pskb_trim_rcsum(skb2, len);
1413 if (err)
8440853b 1414 goto err_out;
eb1d1641
HX
1415 }
1416
1417 len -= ip_hdrlen(skb2);
1418 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
1419 __skb_pull(skb2, offset);
1420 skb_reset_transport_header(skb2);
1421
1422 err = -EINVAL;
1423 if (!pskb_may_pull(skb2, sizeof(*ih)))
1424 goto out;
1425
eb1d1641
HX
1426 switch (skb2->ip_summed) {
1427 case CHECKSUM_COMPLETE:
1428 if (!csum_fold(skb2->csum))
1429 break;
1430 /* fall through */
1431 case CHECKSUM_NONE:
1432 skb2->csum = 0;
1433 if (skb_checksum_complete(skb2))
8440853b 1434 goto out;
eb1d1641
HX
1435 }
1436
1437 err = 0;
1438
b0e9a30d 1439 br_vlan_get_tag(skb2, &vid);
eb1d1641
HX
1440 BR_INPUT_SKB_CB(skb)->igmp = 1;
1441 ih = igmp_hdr(skb2);
1442
1443 switch (ih->type) {
1444 case IGMP_HOST_MEMBERSHIP_REPORT:
1445 case IGMPV2_HOST_MEMBERSHIP_REPORT:
62b2bcb4 1446 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
b0e9a30d 1447 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
eb1d1641
HX
1448 break;
1449 case IGMPV3_HOST_MEMBERSHIP_REPORT:
8ef2a9a5 1450 err = br_ip4_multicast_igmp3_report(br, port, skb2);
eb1d1641
HX
1451 break;
1452 case IGMP_HOST_MEMBERSHIP_QUERY:
8ef2a9a5 1453 err = br_ip4_multicast_query(br, port, skb2);
eb1d1641
HX
1454 break;
1455 case IGMP_HOST_LEAVE_MESSAGE:
b0e9a30d 1456 br_ip4_multicast_leave_group(br, port, ih->group, vid);
eb1d1641
HX
1457 break;
1458 }
1459
1460out:
1461 __skb_push(skb2, offset);
8440853b 1462err_out:
eb1d1641
HX
1463 if (skb2 != skb)
1464 kfree_skb(skb2);
1465 return err;
1466}
1467
dfd56b8b 1468#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
1469static int br_multicast_ipv6_rcv(struct net_bridge *br,
1470 struct net_bridge_port *port,
1471 struct sk_buff *skb)
1472{
9d89081d 1473 struct sk_buff *skb2;
b71d1d42 1474 const struct ipv6hdr *ip6h;
22df1331 1475 u8 icmp6_type;
08b202b6 1476 u8 nexthdr;
75f2811c 1477 __be16 frag_off;
95c96174 1478 unsigned int len;
bb7a0bd6 1479 int offset;
08b202b6 1480 int err;
b0e9a30d 1481 u16 vid = 0;
08b202b6 1482
08b202b6
YH
1483 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1484 return -EINVAL;
1485
1486 ip6h = ipv6_hdr(skb);
1487
1488 /*
1489 * We're interested in MLD messages only.
1490 * - Version is 6
1491 * - MLD has always Router Alert hop-by-hop option
1492 * - But we do not support jumbrograms.
1493 */
8fad9c39
LL
1494 if (ip6h->version != 6)
1495 return 0;
1496
1497 /* Prevent flooding this packet if there is no listener present */
1498 if (ipv6_is_transient_multicast(&ip6h->daddr))
1499 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1500
1501 if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
08b202b6
YH
1502 ip6h->payload_len == 0)
1503 return 0;
1504
ff9a57a6 1505 len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
08b202b6
YH
1506 if (skb->len < len)
1507 return -EINVAL;
1508
1509 nexthdr = ip6h->nexthdr;
75f2811c 1510 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
08b202b6
YH
1511
1512 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1513 return 0;
1514
1515 /* Okay, we found ICMPv6 header */
1516 skb2 = skb_clone(skb, GFP_ATOMIC);
1517 if (!skb2)
1518 return -ENOMEM;
1519
9d89081d
TW
1520 err = -EINVAL;
1521 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1522 goto out;
1523
08b202b6
YH
1524 len -= offset - skb_network_offset(skb2);
1525
1526 __skb_pull(skb2, offset);
1527 skb_reset_transport_header(skb2);
fa2da8cd 1528 skb_postpull_rcsum(skb2, skb_network_header(skb2),
1529 skb_network_header_len(skb2));
08b202b6 1530
22df1331 1531 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
08b202b6 1532
22df1331 1533 switch (icmp6_type) {
08b202b6
YH
1534 case ICMPV6_MGM_QUERY:
1535 case ICMPV6_MGM_REPORT:
1536 case ICMPV6_MGM_REDUCTION:
1537 case ICMPV6_MLD2_REPORT:
1538 break;
1539 default:
1540 err = 0;
1541 goto out;
1542 }
1543
1544 /* Okay, we found MLD message. Check further. */
1545 if (skb2->len > len) {
1546 err = pskb_trim_rcsum(skb2, len);
1547 if (err)
1548 goto out;
4b275d7e 1549 err = -EINVAL;
08b202b6
YH
1550 }
1551
4b275d7e
YZ
1552 ip6h = ipv6_hdr(skb2);
1553
08b202b6
YH
1554 switch (skb2->ip_summed) {
1555 case CHECKSUM_COMPLETE:
4b275d7e
YZ
1556 if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
1557 IPPROTO_ICMPV6, skb2->csum))
08b202b6
YH
1558 break;
1559 /*FALLTHROUGH*/
1560 case CHECKSUM_NONE:
4b275d7e
YZ
1561 skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
1562 &ip6h->daddr,
1563 skb2->len,
1564 IPPROTO_ICMPV6, 0));
1565 if (__skb_checksum_complete(skb2))
08b202b6
YH
1566 goto out;
1567 }
1568
1569 err = 0;
1570
b0e9a30d 1571 br_vlan_get_tag(skb, &vid);
08b202b6
YH
1572 BR_INPUT_SKB_CB(skb)->igmp = 1;
1573
22df1331 1574 switch (icmp6_type) {
08b202b6
YH
1575 case ICMPV6_MGM_REPORT:
1576 {
9d89081d
TW
1577 struct mld_msg *mld;
1578 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1579 err = -EINVAL;
1580 goto out;
1581 }
1582 mld = (struct mld_msg *)skb_transport_header(skb2);
fc2af6c7 1583 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
b0e9a30d 1584 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
08b202b6
YH
1585 break;
1586 }
1587 case ICMPV6_MLD2_REPORT:
1588 err = br_ip6_multicast_mld2_report(br, port, skb2);
1589 break;
1590 case ICMPV6_MGM_QUERY:
1591 err = br_ip6_multicast_query(br, port, skb2);
1592 break;
1593 case ICMPV6_MGM_REDUCTION:
1594 {
9d89081d
TW
1595 struct mld_msg *mld;
1596 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1597 err = -EINVAL;
1598 goto out;
1599 }
1600 mld = (struct mld_msg *)skb_transport_header(skb2);
b0e9a30d 1601 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
08b202b6
YH
1602 }
1603 }
1604
1605out:
9d89081d 1606 kfree_skb(skb2);
08b202b6
YH
1607 return err;
1608}
1609#endif
1610
eb1d1641
HX
1611int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1612 struct sk_buff *skb)
1613{
1fafc7a9
YH
1614 BR_INPUT_SKB_CB(skb)->igmp = 0;
1615 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1616
eb1d1641
HX
1617 if (br->multicast_disabled)
1618 return 0;
1619
1620 switch (skb->protocol) {
1621 case htons(ETH_P_IP):
1622 return br_multicast_ipv4_rcv(br, port, skb);
dfd56b8b 1623#if IS_ENABLED(CONFIG_IPV6)
08b202b6
YH
1624 case htons(ETH_P_IPV6):
1625 return br_multicast_ipv6_rcv(br, port, skb);
1626#endif
eb1d1641
HX
1627 }
1628
1629 return 0;
1630}
1631
1632static void br_multicast_query_expired(unsigned long data)
1633{
1634 struct net_bridge *br = (void *)data;
1635
1636 spin_lock(&br->multicast_lock);
1637 if (br->multicast_startup_queries_sent <
1638 br->multicast_startup_query_count)
1639 br->multicast_startup_queries_sent++;
1640
1641 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1642
1643 spin_unlock(&br->multicast_lock);
1644}
1645
1646void br_multicast_init(struct net_bridge *br)
1647{
1648 br->hash_elasticity = 4;
1649 br->hash_max = 512;
1650
1651 br->multicast_router = 1;
c5c23260 1652 br->multicast_querier = 0;
1c8ad5bf 1653 br->multicast_query_use_ifaddr = 0;
eb1d1641
HX
1654 br->multicast_last_member_count = 2;
1655 br->multicast_startup_query_count = 2;
1656
1657 br->multicast_last_member_interval = HZ;
1658 br->multicast_query_response_interval = 10 * HZ;
1659 br->multicast_startup_query_interval = 125 * HZ / 4;
1660 br->multicast_query_interval = 125 * HZ;
1661 br->multicast_querier_interval = 255 * HZ;
1662 br->multicast_membership_interval = 260 * HZ;
1663
b00589af
LL
1664 br->multicast_querier_delay_time = 0;
1665
eb1d1641
HX
1666 spin_lock_init(&br->multicast_lock);
1667 setup_timer(&br->multicast_router_timer,
1668 br_multicast_local_router_expired, 0);
1669 setup_timer(&br->multicast_querier_timer,
bb63f1f8 1670 br_multicast_querier_expired, (unsigned long)br);
eb1d1641
HX
1671 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1672 (unsigned long)br);
1673}
1674
1675void br_multicast_open(struct net_bridge *br)
1676{
1677 br->multicast_startup_queries_sent = 0;
1678
1679 if (br->multicast_disabled)
1680 return;
1681
1682 mod_timer(&br->multicast_query_timer, jiffies);
1683}
1684
1685void br_multicast_stop(struct net_bridge *br)
1686{
1687 struct net_bridge_mdb_htable *mdb;
1688 struct net_bridge_mdb_entry *mp;
b67bfe0d 1689 struct hlist_node *n;
eb1d1641
HX
1690 u32 ver;
1691 int i;
1692
1693 del_timer_sync(&br->multicast_router_timer);
1694 del_timer_sync(&br->multicast_querier_timer);
1695 del_timer_sync(&br->multicast_query_timer);
1696
1697 spin_lock_bh(&br->multicast_lock);
e8051688 1698 mdb = mlock_dereference(br->mdb, br);
eb1d1641
HX
1699 if (!mdb)
1700 goto out;
1701
1702 br->mdb = NULL;
1703
1704 ver = mdb->ver;
1705 for (i = 0; i < mdb->max; i++) {
b67bfe0d 1706 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
eb1d1641
HX
1707 hlist[ver]) {
1708 del_timer(&mp->timer);
9f00b2e7 1709 mp->timer_armed = false;
eb1d1641
HX
1710 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1711 }
1712 }
1713
1714 if (mdb->old) {
1715 spin_unlock_bh(&br->multicast_lock);
10cc2b50 1716 rcu_barrier_bh();
eb1d1641
HX
1717 spin_lock_bh(&br->multicast_lock);
1718 WARN_ON(mdb->old);
1719 }
1720
1721 mdb->old = mdb;
1722 call_rcu_bh(&mdb->rcu, br_mdb_free);
1723
1724out:
1725 spin_unlock_bh(&br->multicast_lock);
1726}
0909e117
HX
1727
1728int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1729{
1730 int err = -ENOENT;
1731
1732 spin_lock_bh(&br->multicast_lock);
1733 if (!netif_running(br->dev))
1734 goto unlock;
1735
1736 switch (val) {
1737 case 0:
1738 case 2:
1739 del_timer(&br->multicast_router_timer);
1740 /* fall through */
1741 case 1:
1742 br->multicast_router = val;
1743 err = 0;
1744 break;
1745
1746 default:
1747 err = -EINVAL;
1748 break;
1749 }
1750
1751unlock:
1752 spin_unlock_bh(&br->multicast_lock);
1753
1754 return err;
1755}
1756
1757int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1758{
1759 struct net_bridge *br = p->br;
1760 int err = -ENOENT;
1761
1762 spin_lock(&br->multicast_lock);
1763 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1764 goto unlock;
1765
1766 switch (val) {
1767 case 0:
1768 case 1:
1769 case 2:
1770 p->multicast_router = val;
1771 err = 0;
1772
1773 if (val < 2 && !hlist_unhashed(&p->rlist))
1774 hlist_del_init_rcu(&p->rlist);
1775
1776 if (val == 1)
1777 break;
1778
1779 del_timer(&p->multicast_router_timer);
1780
1781 if (val == 0)
1782 break;
1783
1784 br_multicast_add_router(br, p);
1785 break;
1786
1787 default:
1788 err = -EINVAL;
1789 break;
1790 }
1791
1792unlock:
1793 spin_unlock(&br->multicast_lock);
1794
1795 return err;
1796}
561f1103 1797
74857216 1798static void br_multicast_start_querier(struct net_bridge *br)
561f1103
HX
1799{
1800 struct net_bridge_port *port;
74857216
HX
1801
1802 br_multicast_open(br);
1803
1804 list_for_each_entry(port, &br->port_list, list) {
1805 if (port->state == BR_STATE_DISABLED ||
1806 port->state == BR_STATE_BLOCKING)
1807 continue;
1808
1809 __br_multicast_enable_port(port);
1810 }
1811}
1812
1813int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1814{
3a7fda06 1815 int err = 0;
e8051688 1816 struct net_bridge_mdb_htable *mdb;
561f1103 1817
ef5e0d82 1818 spin_lock_bh(&br->multicast_lock);
561f1103
HX
1819 if (br->multicast_disabled == !val)
1820 goto unlock;
1821
1822 br->multicast_disabled = !val;
1823 if (br->multicast_disabled)
1824 goto unlock;
1825
3a7fda06
HX
1826 if (!netif_running(br->dev))
1827 goto unlock;
1828
e8051688
ED
1829 mdb = mlock_dereference(br->mdb, br);
1830 if (mdb) {
1831 if (mdb->old) {
561f1103
HX
1832 err = -EEXIST;
1833rollback:
1834 br->multicast_disabled = !!val;
1835 goto unlock;
1836 }
1837
e8051688 1838 err = br_mdb_rehash(&br->mdb, mdb->max,
561f1103
HX
1839 br->hash_elasticity);
1840 if (err)
1841 goto rollback;
1842 }
1843
74857216 1844 br_multicast_start_querier(br);
561f1103
HX
1845
1846unlock:
ef5e0d82 1847 spin_unlock_bh(&br->multicast_lock);
561f1103
HX
1848
1849 return err;
1850}
b195167f 1851
c5c23260
HX
1852int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1853{
b00589af
LL
1854 unsigned long max_delay;
1855
c5c23260
HX
1856 val = !!val;
1857
1858 spin_lock_bh(&br->multicast_lock);
1859 if (br->multicast_querier == val)
1860 goto unlock;
1861
1862 br->multicast_querier = val;
b00589af
LL
1863 if (!val)
1864 goto unlock;
1865
1866 max_delay = br->multicast_query_response_interval;
1867 if (!timer_pending(&br->multicast_querier_timer))
1868 br->multicast_querier_delay_time = jiffies + max_delay;
1869
1870 br_multicast_start_querier(br);
c5c23260
HX
1871
1872unlock:
1873 spin_unlock_bh(&br->multicast_lock);
1874
1875 return 0;
1876}
1877
b195167f
HX
1878int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1879{
1880 int err = -ENOENT;
1881 u32 old;
e8051688 1882 struct net_bridge_mdb_htable *mdb;
b195167f
HX
1883
1884 spin_lock(&br->multicast_lock);
1885 if (!netif_running(br->dev))
1886 goto unlock;
1887
1888 err = -EINVAL;
1889 if (!is_power_of_2(val))
1890 goto unlock;
e8051688
ED
1891
1892 mdb = mlock_dereference(br->mdb, br);
1893 if (mdb && val < mdb->size)
b195167f
HX
1894 goto unlock;
1895
1896 err = 0;
1897
1898 old = br->hash_max;
1899 br->hash_max = val;
1900
e8051688
ED
1901 if (mdb) {
1902 if (mdb->old) {
b195167f
HX
1903 err = -EEXIST;
1904rollback:
1905 br->hash_max = old;
1906 goto unlock;
1907 }
1908
1909 err = br_mdb_rehash(&br->mdb, br->hash_max,
1910 br->hash_elasticity);
1911 if (err)
1912 goto rollback;
1913 }
1914
1915unlock:
1916 spin_unlock(&br->multicast_lock);
1917
1918 return err;
1919}
This page took 0.456312 seconds and 5 git commands to generate.