ACPI / scan: Add labels for PNP button devices
[sfrench/cifs-2.6.git] / net / bridge / br_multicast.c
1 /*
2  * Bridge multicast support.
3  *
4  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/if_ether.h>
16 #include <linux/igmp.h>
17 #include <linux/in.h>
18 #include <linux/jhash.h>
19 #include <linux/kernel.h>
20 #include <linux/log2.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter_bridge.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/skbuff.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/inetdevice.h>
29 #include <linux/mroute.h>
30 #include <net/ip.h>
31 #include <net/switchdev.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <linux/icmpv6.h>
34 #include <net/ipv6.h>
35 #include <net/mld.h>
36 #include <net/ip6_checksum.h>
37 #include <net/addrconf.h>
38 #endif
39
40 #include "br_private.h"
41
42 static const struct rhashtable_params br_mdb_rht_params = {
43         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
44         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
45         .key_len = sizeof(struct br_ip),
46         .automatic_shrinking = true,
47         .locks_mul = 1,
48 };
49
50 static void br_multicast_start_querier(struct net_bridge *br,
51                                        struct bridge_mcast_own_query *query);
52 static void br_multicast_add_router(struct net_bridge *br,
53                                     struct net_bridge_port *port);
54 static void br_ip4_multicast_leave_group(struct net_bridge *br,
55                                          struct net_bridge_port *port,
56                                          __be32 group,
57                                          __u16 vid,
58                                          const unsigned char *src);
59
60 static void __del_port_router(struct net_bridge_port *p);
61 #if IS_ENABLED(CONFIG_IPV6)
62 static void br_ip6_multicast_leave_group(struct net_bridge *br,
63                                          struct net_bridge_port *port,
64                                          const struct in6_addr *group,
65                                          __u16 vid, const unsigned char *src);
66 #endif
67
68 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
69 {
70         if (a->proto != b->proto)
71                 return 0;
72         if (a->vid != b->vid)
73                 return 0;
74         switch (a->proto) {
75         case htons(ETH_P_IP):
76                 return a->u.ip4 == b->u.ip4;
77 #if IS_ENABLED(CONFIG_IPV6)
78         case htons(ETH_P_IPV6):
79                 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
80 #endif
81         }
82         return 0;
83 }
84
85 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
86                                                       struct br_ip *dst)
87 {
88         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
89 }
90
91 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
92                                            struct br_ip *dst)
93 {
94         struct net_bridge_mdb_entry *ent;
95
96         lockdep_assert_held_once(&br->multicast_lock);
97
98         rcu_read_lock();
99         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
100         rcu_read_unlock();
101
102         return ent;
103 }
104
105 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
106                                                    __be32 dst, __u16 vid)
107 {
108         struct br_ip br_dst;
109
110         memset(&br_dst, 0, sizeof(br_dst));
111         br_dst.u.ip4 = dst;
112         br_dst.proto = htons(ETH_P_IP);
113         br_dst.vid = vid;
114
115         return br_mdb_ip_get(br, &br_dst);
116 }
117
118 #if IS_ENABLED(CONFIG_IPV6)
119 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
120                                                    const struct in6_addr *dst,
121                                                    __u16 vid)
122 {
123         struct br_ip br_dst;
124
125         memset(&br_dst, 0, sizeof(br_dst));
126         br_dst.u.ip6 = *dst;
127         br_dst.proto = htons(ETH_P_IPV6);
128         br_dst.vid = vid;
129
130         return br_mdb_ip_get(br, &br_dst);
131 }
132 #endif
133
134 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
135                                         struct sk_buff *skb, u16 vid)
136 {
137         struct br_ip ip;
138
139         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
140                 return NULL;
141
142         if (BR_INPUT_SKB_CB(skb)->igmp)
143                 return NULL;
144
145         memset(&ip, 0, sizeof(ip));
146         ip.proto = skb->protocol;
147         ip.vid = vid;
148
149         switch (skb->protocol) {
150         case htons(ETH_P_IP):
151                 ip.u.ip4 = ip_hdr(skb)->daddr;
152                 break;
153 #if IS_ENABLED(CONFIG_IPV6)
154         case htons(ETH_P_IPV6):
155                 ip.u.ip6 = ipv6_hdr(skb)->daddr;
156                 break;
157 #endif
158         default:
159                 return NULL;
160         }
161
162         return br_mdb_ip_get_rcu(br, &ip);
163 }
164
165 static void br_multicast_group_expired(struct timer_list *t)
166 {
167         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
168         struct net_bridge *br = mp->br;
169
170         spin_lock(&br->multicast_lock);
171         if (!netif_running(br->dev) || timer_pending(&mp->timer))
172                 goto out;
173
174         mp->host_joined = false;
175         br_mdb_notify(br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
176
177         if (mp->ports)
178                 goto out;
179
180         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
181                                br_mdb_rht_params);
182         hlist_del_rcu(&mp->mdb_node);
183
184         kfree_rcu(mp, rcu);
185
186 out:
187         spin_unlock(&br->multicast_lock);
188 }
189
190 static void br_multicast_del_pg(struct net_bridge *br,
191                                 struct net_bridge_port_group *pg)
192 {
193         struct net_bridge_mdb_entry *mp;
194         struct net_bridge_port_group *p;
195         struct net_bridge_port_group __rcu **pp;
196
197         mp = br_mdb_ip_get(br, &pg->addr);
198         if (WARN_ON(!mp))
199                 return;
200
201         for (pp = &mp->ports;
202              (p = mlock_dereference(*pp, br)) != NULL;
203              pp = &p->next) {
204                 if (p != pg)
205                         continue;
206
207                 rcu_assign_pointer(*pp, p->next);
208                 hlist_del_init(&p->mglist);
209                 del_timer(&p->timer);
210                 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
211                               p->flags);
212                 kfree_rcu(p, rcu);
213
214                 if (!mp->ports && !mp->host_joined &&
215                     netif_running(br->dev))
216                         mod_timer(&mp->timer, jiffies);
217
218                 return;
219         }
220
221         WARN_ON(1);
222 }
223
224 static void br_multicast_port_group_expired(struct timer_list *t)
225 {
226         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
227         struct net_bridge *br = pg->port->br;
228
229         spin_lock(&br->multicast_lock);
230         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
231             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
232                 goto out;
233
234         br_multicast_del_pg(br, pg);
235
236 out:
237         spin_unlock(&br->multicast_lock);
238 }
239
240 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
241                                                     __be32 group,
242                                                     u8 *igmp_type)
243 {
244         struct igmpv3_query *ihv3;
245         size_t igmp_hdr_size;
246         struct sk_buff *skb;
247         struct igmphdr *ih;
248         struct ethhdr *eth;
249         struct iphdr *iph;
250
251         igmp_hdr_size = sizeof(*ih);
252         if (br->multicast_igmp_version == 3)
253                 igmp_hdr_size = sizeof(*ihv3);
254         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
255                                                  igmp_hdr_size + 4);
256         if (!skb)
257                 goto out;
258
259         skb->protocol = htons(ETH_P_IP);
260
261         skb_reset_mac_header(skb);
262         eth = eth_hdr(skb);
263
264         ether_addr_copy(eth->h_source, br->dev->dev_addr);
265         eth->h_dest[0] = 1;
266         eth->h_dest[1] = 0;
267         eth->h_dest[2] = 0x5e;
268         eth->h_dest[3] = 0;
269         eth->h_dest[4] = 0;
270         eth->h_dest[5] = 1;
271         eth->h_proto = htons(ETH_P_IP);
272         skb_put(skb, sizeof(*eth));
273
274         skb_set_network_header(skb, skb->len);
275         iph = ip_hdr(skb);
276
277         iph->version = 4;
278         iph->ihl = 6;
279         iph->tos = 0xc0;
280         iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
281         iph->id = 0;
282         iph->frag_off = htons(IP_DF);
283         iph->ttl = 1;
284         iph->protocol = IPPROTO_IGMP;
285         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
286                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
287         iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
288         ((u8 *)&iph[1])[0] = IPOPT_RA;
289         ((u8 *)&iph[1])[1] = 4;
290         ((u8 *)&iph[1])[2] = 0;
291         ((u8 *)&iph[1])[3] = 0;
292         ip_send_check(iph);
293         skb_put(skb, 24);
294
295         skb_set_transport_header(skb, skb->len);
296         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
297
298         switch (br->multicast_igmp_version) {
299         case 2:
300                 ih = igmp_hdr(skb);
301                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
302                 ih->code = (group ? br->multicast_last_member_interval :
303                                     br->multicast_query_response_interval) /
304                            (HZ / IGMP_TIMER_SCALE);
305                 ih->group = group;
306                 ih->csum = 0;
307                 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
308                 break;
309         case 3:
310                 ihv3 = igmpv3_query_hdr(skb);
311                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
312                 ihv3->code = (group ? br->multicast_last_member_interval :
313                                       br->multicast_query_response_interval) /
314                              (HZ / IGMP_TIMER_SCALE);
315                 ihv3->group = group;
316                 ihv3->qqic = br->multicast_query_interval / HZ;
317                 ihv3->nsrcs = 0;
318                 ihv3->resv = 0;
319                 ihv3->suppress = 0;
320                 ihv3->qrv = 2;
321                 ihv3->csum = 0;
322                 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
323                 break;
324         }
325
326         skb_put(skb, igmp_hdr_size);
327         __skb_pull(skb, sizeof(*eth));
328
329 out:
330         return skb;
331 }
332
333 #if IS_ENABLED(CONFIG_IPV6)
334 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
335                                                     const struct in6_addr *grp,
336                                                     u8 *igmp_type)
337 {
338         struct mld2_query *mld2q;
339         unsigned long interval;
340         struct ipv6hdr *ip6h;
341         struct mld_msg *mldq;
342         size_t mld_hdr_size;
343         struct sk_buff *skb;
344         struct ethhdr *eth;
345         u8 *hopopt;
346
347         mld_hdr_size = sizeof(*mldq);
348         if (br->multicast_mld_version == 2)
349                 mld_hdr_size = sizeof(*mld2q);
350         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
351                                                  8 + mld_hdr_size);
352         if (!skb)
353                 goto out;
354
355         skb->protocol = htons(ETH_P_IPV6);
356
357         /* Ethernet header */
358         skb_reset_mac_header(skb);
359         eth = eth_hdr(skb);
360
361         ether_addr_copy(eth->h_source, br->dev->dev_addr);
362         eth->h_proto = htons(ETH_P_IPV6);
363         skb_put(skb, sizeof(*eth));
364
365         /* IPv6 header + HbH option */
366         skb_set_network_header(skb, skb->len);
367         ip6h = ipv6_hdr(skb);
368
369         *(__force __be32 *)ip6h = htonl(0x60000000);
370         ip6h->payload_len = htons(8 + mld_hdr_size);
371         ip6h->nexthdr = IPPROTO_HOPOPTS;
372         ip6h->hop_limit = 1;
373         ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
374         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
375                                &ip6h->saddr)) {
376                 kfree_skb(skb);
377                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
378                 return NULL;
379         }
380
381         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
382         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
383
384         hopopt = (u8 *)(ip6h + 1);
385         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
386         hopopt[1] = 0;                          /* length of HbH */
387         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
388         hopopt[3] = 2;                          /* Length of RA Option */
389         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
390         hopopt[5] = 0;
391         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
392         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
393
394         skb_put(skb, sizeof(*ip6h) + 8);
395
396         /* ICMPv6 */
397         skb_set_transport_header(skb, skb->len);
398         interval = ipv6_addr_any(grp) ?
399                         br->multicast_query_response_interval :
400                         br->multicast_last_member_interval;
401         *igmp_type = ICMPV6_MGM_QUERY;
402         switch (br->multicast_mld_version) {
403         case 1:
404                 mldq = (struct mld_msg *)icmp6_hdr(skb);
405                 mldq->mld_type = ICMPV6_MGM_QUERY;
406                 mldq->mld_code = 0;
407                 mldq->mld_cksum = 0;
408                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
409                 mldq->mld_reserved = 0;
410                 mldq->mld_mca = *grp;
411                 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
412                                                   sizeof(*mldq), IPPROTO_ICMPV6,
413                                                   csum_partial(mldq,
414                                                                sizeof(*mldq),
415                                                                0));
416                 break;
417         case 2:
418                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
419                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
420                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
421                 mld2q->mld2q_code = 0;
422                 mld2q->mld2q_cksum = 0;
423                 mld2q->mld2q_resv1 = 0;
424                 mld2q->mld2q_resv2 = 0;
425                 mld2q->mld2q_suppress = 0;
426                 mld2q->mld2q_qrv = 2;
427                 mld2q->mld2q_nsrcs = 0;
428                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
429                 mld2q->mld2q_mca = *grp;
430                 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
431                                                      sizeof(*mld2q),
432                                                      IPPROTO_ICMPV6,
433                                                      csum_partial(mld2q,
434                                                                   sizeof(*mld2q),
435                                                                   0));
436                 break;
437         }
438         skb_put(skb, mld_hdr_size);
439
440         __skb_pull(skb, sizeof(*eth));
441
442 out:
443         return skb;
444 }
445 #endif
446
447 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
448                                                 struct br_ip *addr,
449                                                 u8 *igmp_type)
450 {
451         switch (addr->proto) {
452         case htons(ETH_P_IP):
453                 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
454 #if IS_ENABLED(CONFIG_IPV6)
455         case htons(ETH_P_IPV6):
456                 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
457                                                     igmp_type);
458 #endif
459         }
460         return NULL;
461 }
462
463 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
464                                                     struct br_ip *group)
465 {
466         struct net_bridge_mdb_entry *mp;
467         int err;
468
469         mp = br_mdb_ip_get(br, group);
470         if (mp)
471                 return mp;
472
473         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
474                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
475                 return ERR_PTR(-E2BIG);
476         }
477
478         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
479         if (unlikely(!mp))
480                 return ERR_PTR(-ENOMEM);
481
482         mp->br = br;
483         mp->addr = *group;
484         timer_setup(&mp->timer, br_multicast_group_expired, 0);
485         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
486                                             br_mdb_rht_params);
487         if (err) {
488                 kfree(mp);
489                 mp = ERR_PTR(err);
490         } else {
491                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
492         }
493
494         return mp;
495 }
496
497 struct net_bridge_port_group *br_multicast_new_port_group(
498                         struct net_bridge_port *port,
499                         struct br_ip *group,
500                         struct net_bridge_port_group __rcu *next,
501                         unsigned char flags,
502                         const unsigned char *src)
503 {
504         struct net_bridge_port_group *p;
505
506         p = kzalloc(sizeof(*p), GFP_ATOMIC);
507         if (unlikely(!p))
508                 return NULL;
509
510         p->addr = *group;
511         p->port = port;
512         p->flags = flags;
513         rcu_assign_pointer(p->next, next);
514         hlist_add_head(&p->mglist, &port->mglist);
515         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
516
517         if (src)
518                 memcpy(p->eth_addr, src, ETH_ALEN);
519         else
520                 memset(p->eth_addr, 0xff, ETH_ALEN);
521
522         return p;
523 }
524
525 static bool br_port_group_equal(struct net_bridge_port_group *p,
526                                 struct net_bridge_port *port,
527                                 const unsigned char *src)
528 {
529         if (p->port != port)
530                 return false;
531
532         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
533                 return true;
534
535         return ether_addr_equal(src, p->eth_addr);
536 }
537
538 static int br_multicast_add_group(struct net_bridge *br,
539                                   struct net_bridge_port *port,
540                                   struct br_ip *group,
541                                   const unsigned char *src)
542 {
543         struct net_bridge_port_group __rcu **pp;
544         struct net_bridge_port_group *p;
545         struct net_bridge_mdb_entry *mp;
546         unsigned long now = jiffies;
547         int err;
548
549         spin_lock(&br->multicast_lock);
550         if (!netif_running(br->dev) ||
551             (port && port->state == BR_STATE_DISABLED))
552                 goto out;
553
554         mp = br_multicast_new_group(br, group);
555         err = PTR_ERR(mp);
556         if (IS_ERR(mp))
557                 goto err;
558
559         if (!port) {
560                 if (!mp->host_joined) {
561                         mp->host_joined = true;
562                         br_mdb_notify(br->dev, NULL, &mp->addr, RTM_NEWMDB, 0);
563                 }
564                 mod_timer(&mp->timer, now + br->multicast_membership_interval);
565                 goto out;
566         }
567
568         for (pp = &mp->ports;
569              (p = mlock_dereference(*pp, br)) != NULL;
570              pp = &p->next) {
571                 if (br_port_group_equal(p, port, src))
572                         goto found;
573                 if ((unsigned long)p->port < (unsigned long)port)
574                         break;
575         }
576
577         p = br_multicast_new_port_group(port, group, *pp, 0, src);
578         if (unlikely(!p))
579                 goto err;
580         rcu_assign_pointer(*pp, p);
581         br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
582
583 found:
584         mod_timer(&p->timer, now + br->multicast_membership_interval);
585 out:
586         err = 0;
587
588 err:
589         spin_unlock(&br->multicast_lock);
590         return err;
591 }
592
593 static int br_ip4_multicast_add_group(struct net_bridge *br,
594                                       struct net_bridge_port *port,
595                                       __be32 group,
596                                       __u16 vid,
597                                       const unsigned char *src)
598 {
599         struct br_ip br_group;
600
601         if (ipv4_is_local_multicast(group))
602                 return 0;
603
604         br_group.u.ip4 = group;
605         br_group.proto = htons(ETH_P_IP);
606         br_group.vid = vid;
607
608         return br_multicast_add_group(br, port, &br_group, src);
609 }
610
611 #if IS_ENABLED(CONFIG_IPV6)
612 static int br_ip6_multicast_add_group(struct net_bridge *br,
613                                       struct net_bridge_port *port,
614                                       const struct in6_addr *group,
615                                       __u16 vid,
616                                       const unsigned char *src)
617 {
618         struct br_ip br_group;
619
620         if (ipv6_addr_is_ll_all_nodes(group))
621                 return 0;
622
623         memset(&br_group, 0, sizeof(br_group));
624         br_group.u.ip6 = *group;
625         br_group.proto = htons(ETH_P_IPV6);
626         br_group.vid = vid;
627
628         return br_multicast_add_group(br, port, &br_group, src);
629 }
630 #endif
631
632 static void br_multicast_router_expired(struct timer_list *t)
633 {
634         struct net_bridge_port *port =
635                         from_timer(port, t, multicast_router_timer);
636         struct net_bridge *br = port->br;
637
638         spin_lock(&br->multicast_lock);
639         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
640             port->multicast_router == MDB_RTR_TYPE_PERM ||
641             timer_pending(&port->multicast_router_timer))
642                 goto out;
643
644         __del_port_router(port);
645 out:
646         spin_unlock(&br->multicast_lock);
647 }
648
649 static void br_mc_router_state_change(struct net_bridge *p,
650                                       bool is_mc_router)
651 {
652         struct switchdev_attr attr = {
653                 .orig_dev = p->dev,
654                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
655                 .flags = SWITCHDEV_F_DEFER,
656                 .u.mrouter = is_mc_router,
657         };
658
659         switchdev_port_attr_set(p->dev, &attr);
660 }
661
662 static void br_multicast_local_router_expired(struct timer_list *t)
663 {
664         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
665
666         spin_lock(&br->multicast_lock);
667         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
668             br->multicast_router == MDB_RTR_TYPE_PERM ||
669             timer_pending(&br->multicast_router_timer))
670                 goto out;
671
672         br_mc_router_state_change(br, false);
673 out:
674         spin_unlock(&br->multicast_lock);
675 }
676
677 static void br_multicast_querier_expired(struct net_bridge *br,
678                                          struct bridge_mcast_own_query *query)
679 {
680         spin_lock(&br->multicast_lock);
681         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
682                 goto out;
683
684         br_multicast_start_querier(br, query);
685
686 out:
687         spin_unlock(&br->multicast_lock);
688 }
689
690 static void br_ip4_multicast_querier_expired(struct timer_list *t)
691 {
692         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
693
694         br_multicast_querier_expired(br, &br->ip4_own_query);
695 }
696
697 #if IS_ENABLED(CONFIG_IPV6)
698 static void br_ip6_multicast_querier_expired(struct timer_list *t)
699 {
700         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
701
702         br_multicast_querier_expired(br, &br->ip6_own_query);
703 }
704 #endif
705
706 static void br_multicast_select_own_querier(struct net_bridge *br,
707                                             struct br_ip *ip,
708                                             struct sk_buff *skb)
709 {
710         if (ip->proto == htons(ETH_P_IP))
711                 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
712 #if IS_ENABLED(CONFIG_IPV6)
713         else
714                 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
715 #endif
716 }
717
718 static void __br_multicast_send_query(struct net_bridge *br,
719                                       struct net_bridge_port *port,
720                                       struct br_ip *ip)
721 {
722         struct sk_buff *skb;
723         u8 igmp_type;
724
725         skb = br_multicast_alloc_query(br, ip, &igmp_type);
726         if (!skb)
727                 return;
728
729         if (port) {
730                 skb->dev = port->dev;
731                 br_multicast_count(br, port, skb, igmp_type,
732                                    BR_MCAST_DIR_TX);
733                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
734                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
735                         br_dev_queue_push_xmit);
736         } else {
737                 br_multicast_select_own_querier(br, ip, skb);
738                 br_multicast_count(br, port, skb, igmp_type,
739                                    BR_MCAST_DIR_RX);
740                 netif_rx(skb);
741         }
742 }
743
744 static void br_multicast_send_query(struct net_bridge *br,
745                                     struct net_bridge_port *port,
746                                     struct bridge_mcast_own_query *own_query)
747 {
748         struct bridge_mcast_other_query *other_query = NULL;
749         struct br_ip br_group;
750         unsigned long time;
751
752         if (!netif_running(br->dev) ||
753             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
754             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
755                 return;
756
757         memset(&br_group.u, 0, sizeof(br_group.u));
758
759         if (port ? (own_query == &port->ip4_own_query) :
760                    (own_query == &br->ip4_own_query)) {
761                 other_query = &br->ip4_other_query;
762                 br_group.proto = htons(ETH_P_IP);
763 #if IS_ENABLED(CONFIG_IPV6)
764         } else {
765                 other_query = &br->ip6_other_query;
766                 br_group.proto = htons(ETH_P_IPV6);
767 #endif
768         }
769
770         if (!other_query || timer_pending(&other_query->timer))
771                 return;
772
773         __br_multicast_send_query(br, port, &br_group);
774
775         time = jiffies;
776         time += own_query->startup_sent < br->multicast_startup_query_count ?
777                 br->multicast_startup_query_interval :
778                 br->multicast_query_interval;
779         mod_timer(&own_query->timer, time);
780 }
781
782 static void
783 br_multicast_port_query_expired(struct net_bridge_port *port,
784                                 struct bridge_mcast_own_query *query)
785 {
786         struct net_bridge *br = port->br;
787
788         spin_lock(&br->multicast_lock);
789         if (port->state == BR_STATE_DISABLED ||
790             port->state == BR_STATE_BLOCKING)
791                 goto out;
792
793         if (query->startup_sent < br->multicast_startup_query_count)
794                 query->startup_sent++;
795
796         br_multicast_send_query(port->br, port, query);
797
798 out:
799         spin_unlock(&br->multicast_lock);
800 }
801
802 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
803 {
804         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
805
806         br_multicast_port_query_expired(port, &port->ip4_own_query);
807 }
808
809 #if IS_ENABLED(CONFIG_IPV6)
810 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
811 {
812         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
813
814         br_multicast_port_query_expired(port, &port->ip6_own_query);
815 }
816 #endif
817
818 static void br_mc_disabled_update(struct net_device *dev, bool value)
819 {
820         struct switchdev_attr attr = {
821                 .orig_dev = dev,
822                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
823                 .flags = SWITCHDEV_F_DEFER,
824                 .u.mc_disabled = !value,
825         };
826
827         switchdev_port_attr_set(dev, &attr);
828 }
829
830 int br_multicast_add_port(struct net_bridge_port *port)
831 {
832         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
833
834         timer_setup(&port->multicast_router_timer,
835                     br_multicast_router_expired, 0);
836         timer_setup(&port->ip4_own_query.timer,
837                     br_ip4_multicast_port_query_expired, 0);
838 #if IS_ENABLED(CONFIG_IPV6)
839         timer_setup(&port->ip6_own_query.timer,
840                     br_ip6_multicast_port_query_expired, 0);
841 #endif
842         br_mc_disabled_update(port->dev,
843                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
844
845         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
846         if (!port->mcast_stats)
847                 return -ENOMEM;
848
849         return 0;
850 }
851
852 void br_multicast_del_port(struct net_bridge_port *port)
853 {
854         struct net_bridge *br = port->br;
855         struct net_bridge_port_group *pg;
856         struct hlist_node *n;
857
858         /* Take care of the remaining groups, only perm ones should be left */
859         spin_lock_bh(&br->multicast_lock);
860         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
861                 br_multicast_del_pg(br, pg);
862         spin_unlock_bh(&br->multicast_lock);
863         del_timer_sync(&port->multicast_router_timer);
864         free_percpu(port->mcast_stats);
865 }
866
867 static void br_multicast_enable(struct bridge_mcast_own_query *query)
868 {
869         query->startup_sent = 0;
870
871         if (try_to_del_timer_sync(&query->timer) >= 0 ||
872             del_timer(&query->timer))
873                 mod_timer(&query->timer, jiffies);
874 }
875
876 static void __br_multicast_enable_port(struct net_bridge_port *port)
877 {
878         struct net_bridge *br = port->br;
879
880         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
881                 return;
882
883         br_multicast_enable(&port->ip4_own_query);
884 #if IS_ENABLED(CONFIG_IPV6)
885         br_multicast_enable(&port->ip6_own_query);
886 #endif
887         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
888             hlist_unhashed(&port->rlist))
889                 br_multicast_add_router(br, port);
890 }
891
892 void br_multicast_enable_port(struct net_bridge_port *port)
893 {
894         struct net_bridge *br = port->br;
895
896         spin_lock(&br->multicast_lock);
897         __br_multicast_enable_port(port);
898         spin_unlock(&br->multicast_lock);
899 }
900
901 void br_multicast_disable_port(struct net_bridge_port *port)
902 {
903         struct net_bridge *br = port->br;
904         struct net_bridge_port_group *pg;
905         struct hlist_node *n;
906
907         spin_lock(&br->multicast_lock);
908         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
909                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
910                         br_multicast_del_pg(br, pg);
911
912         __del_port_router(port);
913
914         del_timer(&port->multicast_router_timer);
915         del_timer(&port->ip4_own_query.timer);
916 #if IS_ENABLED(CONFIG_IPV6)
917         del_timer(&port->ip6_own_query.timer);
918 #endif
919         spin_unlock(&br->multicast_lock);
920 }
921
922 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
923                                          struct net_bridge_port *port,
924                                          struct sk_buff *skb,
925                                          u16 vid)
926 {
927         const unsigned char *src;
928         struct igmpv3_report *ih;
929         struct igmpv3_grec *grec;
930         int i;
931         int len;
932         int num;
933         int type;
934         int err = 0;
935         __be32 group;
936
937         ih = igmpv3_report_hdr(skb);
938         num = ntohs(ih->ngrec);
939         len = skb_transport_offset(skb) + sizeof(*ih);
940
941         for (i = 0; i < num; i++) {
942                 len += sizeof(*grec);
943                 if (!ip_mc_may_pull(skb, len))
944                         return -EINVAL;
945
946                 grec = (void *)(skb->data + len - sizeof(*grec));
947                 group = grec->grec_mca;
948                 type = grec->grec_type;
949
950                 len += ntohs(grec->grec_nsrcs) * 4;
951                 if (!ip_mc_may_pull(skb, len))
952                         return -EINVAL;
953
954                 /* We treat this as an IGMPv2 report for now. */
955                 switch (type) {
956                 case IGMPV3_MODE_IS_INCLUDE:
957                 case IGMPV3_MODE_IS_EXCLUDE:
958                 case IGMPV3_CHANGE_TO_INCLUDE:
959                 case IGMPV3_CHANGE_TO_EXCLUDE:
960                 case IGMPV3_ALLOW_NEW_SOURCES:
961                 case IGMPV3_BLOCK_OLD_SOURCES:
962                         break;
963
964                 default:
965                         continue;
966                 }
967
968                 src = eth_hdr(skb)->h_source;
969                 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
970                      type == IGMPV3_MODE_IS_INCLUDE) &&
971                     ntohs(grec->grec_nsrcs) == 0) {
972                         br_ip4_multicast_leave_group(br, port, group, vid, src);
973                 } else {
974                         err = br_ip4_multicast_add_group(br, port, group, vid,
975                                                          src);
976                         if (err)
977                                 break;
978                 }
979         }
980
981         return err;
982 }
983
984 #if IS_ENABLED(CONFIG_IPV6)
985 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
986                                         struct net_bridge_port *port,
987                                         struct sk_buff *skb,
988                                         u16 vid)
989 {
990         unsigned int nsrcs_offset;
991         const unsigned char *src;
992         struct icmp6hdr *icmp6h;
993         struct mld2_grec *grec;
994         unsigned int grec_len;
995         int i;
996         int len;
997         int num;
998         int err = 0;
999
1000         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
1001                 return -EINVAL;
1002
1003         icmp6h = icmp6_hdr(skb);
1004         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1005         len = skb_transport_offset(skb) + sizeof(*icmp6h);
1006
1007         for (i = 0; i < num; i++) {
1008                 __be16 *nsrcs, _nsrcs;
1009
1010                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1011
1012                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1013                     nsrcs_offset + sizeof(_nsrcs))
1014                         return -EINVAL;
1015
1016                 nsrcs = skb_header_pointer(skb, nsrcs_offset,
1017                                            sizeof(_nsrcs), &_nsrcs);
1018                 if (!nsrcs)
1019                         return -EINVAL;
1020
1021                 grec_len = struct_size(grec, grec_src, ntohs(*nsrcs));
1022
1023                 if (!ipv6_mc_may_pull(skb, len + grec_len))
1024                         return -EINVAL;
1025
1026                 grec = (struct mld2_grec *)(skb->data + len);
1027                 len += grec_len;
1028
1029                 /* We treat these as MLDv1 reports for now. */
1030                 switch (grec->grec_type) {
1031                 case MLD2_MODE_IS_INCLUDE:
1032                 case MLD2_MODE_IS_EXCLUDE:
1033                 case MLD2_CHANGE_TO_INCLUDE:
1034                 case MLD2_CHANGE_TO_EXCLUDE:
1035                 case MLD2_ALLOW_NEW_SOURCES:
1036                 case MLD2_BLOCK_OLD_SOURCES:
1037                         break;
1038
1039                 default:
1040                         continue;
1041                 }
1042
1043                 src = eth_hdr(skb)->h_source;
1044                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1045                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1046                     ntohs(*nsrcs) == 0) {
1047                         br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1048                                                      vid, src);
1049                 } else {
1050                         err = br_ip6_multicast_add_group(br, port,
1051                                                          &grec->grec_mca, vid,
1052                                                          src);
1053                         if (err)
1054                                 break;
1055                 }
1056         }
1057
1058         return err;
1059 }
1060 #endif
1061
1062 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1063                                             struct net_bridge_port *port,
1064                                             __be32 saddr)
1065 {
1066         if (!timer_pending(&br->ip4_own_query.timer) &&
1067             !timer_pending(&br->ip4_other_query.timer))
1068                 goto update;
1069
1070         if (!br->ip4_querier.addr.u.ip4)
1071                 goto update;
1072
1073         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1074                 goto update;
1075
1076         return false;
1077
1078 update:
1079         br->ip4_querier.addr.u.ip4 = saddr;
1080
1081         /* update protected by general multicast_lock by caller */
1082         rcu_assign_pointer(br->ip4_querier.port, port);
1083
1084         return true;
1085 }
1086
1087 #if IS_ENABLED(CONFIG_IPV6)
1088 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1089                                             struct net_bridge_port *port,
1090                                             struct in6_addr *saddr)
1091 {
1092         if (!timer_pending(&br->ip6_own_query.timer) &&
1093             !timer_pending(&br->ip6_other_query.timer))
1094                 goto update;
1095
1096         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1097                 goto update;
1098
1099         return false;
1100
1101 update:
1102         br->ip6_querier.addr.u.ip6 = *saddr;
1103
1104         /* update protected by general multicast_lock by caller */
1105         rcu_assign_pointer(br->ip6_querier.port, port);
1106
1107         return true;
1108 }
1109 #endif
1110
1111 static bool br_multicast_select_querier(struct net_bridge *br,
1112                                         struct net_bridge_port *port,
1113                                         struct br_ip *saddr)
1114 {
1115         switch (saddr->proto) {
1116         case htons(ETH_P_IP):
1117                 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1118 #if IS_ENABLED(CONFIG_IPV6)
1119         case htons(ETH_P_IPV6):
1120                 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1121 #endif
1122         }
1123
1124         return false;
1125 }
1126
1127 static void
1128 br_multicast_update_query_timer(struct net_bridge *br,
1129                                 struct bridge_mcast_other_query *query,
1130                                 unsigned long max_delay)
1131 {
1132         if (!timer_pending(&query->timer))
1133                 query->delay_time = jiffies + max_delay;
1134
1135         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1136 }
1137
1138 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1139                                            bool is_mc_router)
1140 {
1141         struct switchdev_attr attr = {
1142                 .orig_dev = p->dev,
1143                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1144                 .flags = SWITCHDEV_F_DEFER,
1145                 .u.mrouter = is_mc_router,
1146         };
1147
1148         switchdev_port_attr_set(p->dev, &attr);
1149 }
1150
1151 /*
1152  * Add port to router_list
1153  *  list is maintained ordered by pointer value
1154  *  and locked by br->multicast_lock and RCU
1155  */
1156 static void br_multicast_add_router(struct net_bridge *br,
1157                                     struct net_bridge_port *port)
1158 {
1159         struct net_bridge_port *p;
1160         struct hlist_node *slot = NULL;
1161
1162         if (!hlist_unhashed(&port->rlist))
1163                 return;
1164
1165         hlist_for_each_entry(p, &br->router_list, rlist) {
1166                 if ((unsigned long) port >= (unsigned long) p)
1167                         break;
1168                 slot = &p->rlist;
1169         }
1170
1171         if (slot)
1172                 hlist_add_behind_rcu(&port->rlist, slot);
1173         else
1174                 hlist_add_head_rcu(&port->rlist, &br->router_list);
1175         br_rtr_notify(br->dev, port, RTM_NEWMDB);
1176         br_port_mc_router_state_change(port, true);
1177 }
1178
1179 static void br_multicast_mark_router(struct net_bridge *br,
1180                                      struct net_bridge_port *port)
1181 {
1182         unsigned long now = jiffies;
1183
1184         if (!port) {
1185                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1186                         if (!timer_pending(&br->multicast_router_timer))
1187                                 br_mc_router_state_change(br, true);
1188                         mod_timer(&br->multicast_router_timer,
1189                                   now + br->multicast_querier_interval);
1190                 }
1191                 return;
1192         }
1193
1194         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1195             port->multicast_router == MDB_RTR_TYPE_PERM)
1196                 return;
1197
1198         br_multicast_add_router(br, port);
1199
1200         mod_timer(&port->multicast_router_timer,
1201                   now + br->multicast_querier_interval);
1202 }
1203
1204 static void br_multicast_query_received(struct net_bridge *br,
1205                                         struct net_bridge_port *port,
1206                                         struct bridge_mcast_other_query *query,
1207                                         struct br_ip *saddr,
1208                                         unsigned long max_delay)
1209 {
1210         if (!br_multicast_select_querier(br, port, saddr))
1211                 return;
1212
1213         br_multicast_update_query_timer(br, query, max_delay);
1214         br_multicast_mark_router(br, port);
1215 }
1216
1217 static void br_ip4_multicast_query(struct net_bridge *br,
1218                                    struct net_bridge_port *port,
1219                                    struct sk_buff *skb,
1220                                    u16 vid)
1221 {
1222         unsigned int transport_len = ip_transport_len(skb);
1223         const struct iphdr *iph = ip_hdr(skb);
1224         struct igmphdr *ih = igmp_hdr(skb);
1225         struct net_bridge_mdb_entry *mp;
1226         struct igmpv3_query *ih3;
1227         struct net_bridge_port_group *p;
1228         struct net_bridge_port_group __rcu **pp;
1229         struct br_ip saddr;
1230         unsigned long max_delay;
1231         unsigned long now = jiffies;
1232         __be32 group;
1233
1234         spin_lock(&br->multicast_lock);
1235         if (!netif_running(br->dev) ||
1236             (port && port->state == BR_STATE_DISABLED))
1237                 goto out;
1238
1239         group = ih->group;
1240
1241         if (transport_len == sizeof(*ih)) {
1242                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1243
1244                 if (!max_delay) {
1245                         max_delay = 10 * HZ;
1246                         group = 0;
1247                 }
1248         } else if (transport_len >= sizeof(*ih3)) {
1249                 ih3 = igmpv3_query_hdr(skb);
1250                 if (ih3->nsrcs)
1251                         goto out;
1252
1253                 max_delay = ih3->code ?
1254                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1255         } else {
1256                 goto out;
1257         }
1258
1259         if (!group) {
1260                 saddr.proto = htons(ETH_P_IP);
1261                 saddr.u.ip4 = iph->saddr;
1262
1263                 br_multicast_query_received(br, port, &br->ip4_other_query,
1264                                             &saddr, max_delay);
1265                 goto out;
1266         }
1267
1268         mp = br_mdb_ip4_get(br, group, vid);
1269         if (!mp)
1270                 goto out;
1271
1272         max_delay *= br->multicast_last_member_count;
1273
1274         if (mp->host_joined &&
1275             (timer_pending(&mp->timer) ?
1276              time_after(mp->timer.expires, now + max_delay) :
1277              try_to_del_timer_sync(&mp->timer) >= 0))
1278                 mod_timer(&mp->timer, now + max_delay);
1279
1280         for (pp = &mp->ports;
1281              (p = mlock_dereference(*pp, br)) != NULL;
1282              pp = &p->next) {
1283                 if (timer_pending(&p->timer) ?
1284                     time_after(p->timer.expires, now + max_delay) :
1285                     try_to_del_timer_sync(&p->timer) >= 0)
1286                         mod_timer(&p->timer, now + max_delay);
1287         }
1288
1289 out:
1290         spin_unlock(&br->multicast_lock);
1291 }
1292
1293 #if IS_ENABLED(CONFIG_IPV6)
1294 static int br_ip6_multicast_query(struct net_bridge *br,
1295                                   struct net_bridge_port *port,
1296                                   struct sk_buff *skb,
1297                                   u16 vid)
1298 {
1299         unsigned int transport_len = ipv6_transport_len(skb);
1300         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1301         struct mld_msg *mld;
1302         struct net_bridge_mdb_entry *mp;
1303         struct mld2_query *mld2q;
1304         struct net_bridge_port_group *p;
1305         struct net_bridge_port_group __rcu **pp;
1306         struct br_ip saddr;
1307         unsigned long max_delay;
1308         unsigned long now = jiffies;
1309         unsigned int offset = skb_transport_offset(skb);
1310         const struct in6_addr *group = NULL;
1311         bool is_general_query;
1312         int err = 0;
1313
1314         spin_lock(&br->multicast_lock);
1315         if (!netif_running(br->dev) ||
1316             (port && port->state == BR_STATE_DISABLED))
1317                 goto out;
1318
1319         if (transport_len == sizeof(*mld)) {
1320                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1321                         err = -EINVAL;
1322                         goto out;
1323                 }
1324                 mld = (struct mld_msg *) icmp6_hdr(skb);
1325                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1326                 if (max_delay)
1327                         group = &mld->mld_mca;
1328         } else {
1329                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1330                         err = -EINVAL;
1331                         goto out;
1332                 }
1333                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1334                 if (!mld2q->mld2q_nsrcs)
1335                         group = &mld2q->mld2q_mca;
1336
1337                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1338         }
1339
1340         is_general_query = group && ipv6_addr_any(group);
1341
1342         if (is_general_query) {
1343                 saddr.proto = htons(ETH_P_IPV6);
1344                 saddr.u.ip6 = ip6h->saddr;
1345
1346                 br_multicast_query_received(br, port, &br->ip6_other_query,
1347                                             &saddr, max_delay);
1348                 goto out;
1349         } else if (!group) {
1350                 goto out;
1351         }
1352
1353         mp = br_mdb_ip6_get(br, group, vid);
1354         if (!mp)
1355                 goto out;
1356
1357         max_delay *= br->multicast_last_member_count;
1358         if (mp->host_joined &&
1359             (timer_pending(&mp->timer) ?
1360              time_after(mp->timer.expires, now + max_delay) :
1361              try_to_del_timer_sync(&mp->timer) >= 0))
1362                 mod_timer(&mp->timer, now + max_delay);
1363
1364         for (pp = &mp->ports;
1365              (p = mlock_dereference(*pp, br)) != NULL;
1366              pp = &p->next) {
1367                 if (timer_pending(&p->timer) ?
1368                     time_after(p->timer.expires, now + max_delay) :
1369                     try_to_del_timer_sync(&p->timer) >= 0)
1370                         mod_timer(&p->timer, now + max_delay);
1371         }
1372
1373 out:
1374         spin_unlock(&br->multicast_lock);
1375         return err;
1376 }
1377 #endif
1378
1379 static void
1380 br_multicast_leave_group(struct net_bridge *br,
1381                          struct net_bridge_port *port,
1382                          struct br_ip *group,
1383                          struct bridge_mcast_other_query *other_query,
1384                          struct bridge_mcast_own_query *own_query,
1385                          const unsigned char *src)
1386 {
1387         struct net_bridge_mdb_entry *mp;
1388         struct net_bridge_port_group *p;
1389         unsigned long now;
1390         unsigned long time;
1391
1392         spin_lock(&br->multicast_lock);
1393         if (!netif_running(br->dev) ||
1394             (port && port->state == BR_STATE_DISABLED))
1395                 goto out;
1396
1397         mp = br_mdb_ip_get(br, group);
1398         if (!mp)
1399                 goto out;
1400
1401         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1402                 struct net_bridge_port_group __rcu **pp;
1403
1404                 for (pp = &mp->ports;
1405                      (p = mlock_dereference(*pp, br)) != NULL;
1406                      pp = &p->next) {
1407                         if (!br_port_group_equal(p, port, src))
1408                                 continue;
1409
1410                         rcu_assign_pointer(*pp, p->next);
1411                         hlist_del_init(&p->mglist);
1412                         del_timer(&p->timer);
1413                         kfree_rcu(p, rcu);
1414                         br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1415                                       p->flags);
1416
1417                         if (!mp->ports && !mp->host_joined &&
1418                             netif_running(br->dev))
1419                                 mod_timer(&mp->timer, jiffies);
1420                 }
1421                 goto out;
1422         }
1423
1424         if (timer_pending(&other_query->timer))
1425                 goto out;
1426
1427         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1428                 __br_multicast_send_query(br, port, &mp->addr);
1429
1430                 time = jiffies + br->multicast_last_member_count *
1431                                  br->multicast_last_member_interval;
1432
1433                 mod_timer(&own_query->timer, time);
1434
1435                 for (p = mlock_dereference(mp->ports, br);
1436                      p != NULL;
1437                      p = mlock_dereference(p->next, br)) {
1438                         if (!br_port_group_equal(p, port, src))
1439                                 continue;
1440
1441                         if (!hlist_unhashed(&p->mglist) &&
1442                             (timer_pending(&p->timer) ?
1443                              time_after(p->timer.expires, time) :
1444                              try_to_del_timer_sync(&p->timer) >= 0)) {
1445                                 mod_timer(&p->timer, time);
1446                         }
1447
1448                         break;
1449                 }
1450         }
1451
1452         now = jiffies;
1453         time = now + br->multicast_last_member_count *
1454                      br->multicast_last_member_interval;
1455
1456         if (!port) {
1457                 if (mp->host_joined &&
1458                     (timer_pending(&mp->timer) ?
1459                      time_after(mp->timer.expires, time) :
1460                      try_to_del_timer_sync(&mp->timer) >= 0)) {
1461                         mod_timer(&mp->timer, time);
1462                 }
1463
1464                 goto out;
1465         }
1466
1467         for (p = mlock_dereference(mp->ports, br);
1468              p != NULL;
1469              p = mlock_dereference(p->next, br)) {
1470                 if (p->port != port)
1471                         continue;
1472
1473                 if (!hlist_unhashed(&p->mglist) &&
1474                     (timer_pending(&p->timer) ?
1475                      time_after(p->timer.expires, time) :
1476                      try_to_del_timer_sync(&p->timer) >= 0)) {
1477                         mod_timer(&p->timer, time);
1478                 }
1479
1480                 break;
1481         }
1482 out:
1483         spin_unlock(&br->multicast_lock);
1484 }
1485
1486 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1487                                          struct net_bridge_port *port,
1488                                          __be32 group,
1489                                          __u16 vid,
1490                                          const unsigned char *src)
1491 {
1492         struct br_ip br_group;
1493         struct bridge_mcast_own_query *own_query;
1494
1495         if (ipv4_is_local_multicast(group))
1496                 return;
1497
1498         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499
1500         br_group.u.ip4 = group;
1501         br_group.proto = htons(ETH_P_IP);
1502         br_group.vid = vid;
1503
1504         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1505                                  own_query, src);
1506 }
1507
1508 #if IS_ENABLED(CONFIG_IPV6)
1509 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1510                                          struct net_bridge_port *port,
1511                                          const struct in6_addr *group,
1512                                          __u16 vid,
1513                                          const unsigned char *src)
1514 {
1515         struct br_ip br_group;
1516         struct bridge_mcast_own_query *own_query;
1517
1518         if (ipv6_addr_is_ll_all_nodes(group))
1519                 return;
1520
1521         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1522
1523         br_group.u.ip6 = *group;
1524         br_group.proto = htons(ETH_P_IPV6);
1525         br_group.vid = vid;
1526
1527         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1528                                  own_query, src);
1529 }
1530 #endif
1531
1532 static void br_multicast_err_count(const struct net_bridge *br,
1533                                    const struct net_bridge_port *p,
1534                                    __be16 proto)
1535 {
1536         struct bridge_mcast_stats __percpu *stats;
1537         struct bridge_mcast_stats *pstats;
1538
1539         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1540                 return;
1541
1542         if (p)
1543                 stats = p->mcast_stats;
1544         else
1545                 stats = br->mcast_stats;
1546         if (WARN_ON(!stats))
1547                 return;
1548
1549         pstats = this_cpu_ptr(stats);
1550
1551         u64_stats_update_begin(&pstats->syncp);
1552         switch (proto) {
1553         case htons(ETH_P_IP):
1554                 pstats->mstats.igmp_parse_errors++;
1555                 break;
1556 #if IS_ENABLED(CONFIG_IPV6)
1557         case htons(ETH_P_IPV6):
1558                 pstats->mstats.mld_parse_errors++;
1559                 break;
1560 #endif
1561         }
1562         u64_stats_update_end(&pstats->syncp);
1563 }
1564
1565 static void br_multicast_pim(struct net_bridge *br,
1566                              struct net_bridge_port *port,
1567                              const struct sk_buff *skb)
1568 {
1569         unsigned int offset = skb_transport_offset(skb);
1570         struct pimhdr *pimhdr, _pimhdr;
1571
1572         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1573         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1574             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1575                 return;
1576
1577         br_multicast_mark_router(br, port);
1578 }
1579
1580 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1581                                     struct net_bridge_port *port,
1582                                     struct sk_buff *skb)
1583 {
1584         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1585             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1586                 return -ENOMSG;
1587
1588         br_multicast_mark_router(br, port);
1589
1590         return 0;
1591 }
1592
1593 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1594                                  struct net_bridge_port *port,
1595                                  struct sk_buff *skb,
1596                                  u16 vid)
1597 {
1598         const unsigned char *src;
1599         struct igmphdr *ih;
1600         int err;
1601
1602         err = ip_mc_check_igmp(skb);
1603
1604         if (err == -ENOMSG) {
1605                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1606                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1607                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1608                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1609                                 br_multicast_pim(br, port, skb);
1610                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1611                         br_ip4_multicast_mrd_rcv(br, port, skb);
1612                 }
1613
1614                 return 0;
1615         } else if (err < 0) {
1616                 br_multicast_err_count(br, port, skb->protocol);
1617                 return err;
1618         }
1619
1620         ih = igmp_hdr(skb);
1621         src = eth_hdr(skb)->h_source;
1622         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1623
1624         switch (ih->type) {
1625         case IGMP_HOST_MEMBERSHIP_REPORT:
1626         case IGMPV2_HOST_MEMBERSHIP_REPORT:
1627                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1628                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1629                 break;
1630         case IGMPV3_HOST_MEMBERSHIP_REPORT:
1631                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1632                 break;
1633         case IGMP_HOST_MEMBERSHIP_QUERY:
1634                 br_ip4_multicast_query(br, port, skb, vid);
1635                 break;
1636         case IGMP_HOST_LEAVE_MESSAGE:
1637                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1638                 break;
1639         }
1640
1641         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1642                            BR_MCAST_DIR_RX);
1643
1644         return err;
1645 }
1646
1647 #if IS_ENABLED(CONFIG_IPV6)
1648 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1649                                     struct net_bridge_port *port,
1650                                     struct sk_buff *skb)
1651 {
1652         int ret;
1653
1654         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1655                 return -ENOMSG;
1656
1657         ret = ipv6_mc_check_icmpv6(skb);
1658         if (ret < 0)
1659                 return ret;
1660
1661         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1662                 return -ENOMSG;
1663
1664         br_multicast_mark_router(br, port);
1665
1666         return 0;
1667 }
1668
1669 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1670                                  struct net_bridge_port *port,
1671                                  struct sk_buff *skb,
1672                                  u16 vid)
1673 {
1674         const unsigned char *src;
1675         struct mld_msg *mld;
1676         int err;
1677
1678         err = ipv6_mc_check_mld(skb);
1679
1680         if (err == -ENOMSG) {
1681                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1682                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1683
1684                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1685                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
1686
1687                         if (err < 0 && err != -ENOMSG) {
1688                                 br_multicast_err_count(br, port, skb->protocol);
1689                                 return err;
1690                         }
1691                 }
1692
1693                 return 0;
1694         } else if (err < 0) {
1695                 br_multicast_err_count(br, port, skb->protocol);
1696                 return err;
1697         }
1698
1699         mld = (struct mld_msg *)skb_transport_header(skb);
1700         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1701
1702         switch (mld->mld_type) {
1703         case ICMPV6_MGM_REPORT:
1704                 src = eth_hdr(skb)->h_source;
1705                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1706                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1707                                                  src);
1708                 break;
1709         case ICMPV6_MLD2_REPORT:
1710                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1711                 break;
1712         case ICMPV6_MGM_QUERY:
1713                 err = br_ip6_multicast_query(br, port, skb, vid);
1714                 break;
1715         case ICMPV6_MGM_REDUCTION:
1716                 src = eth_hdr(skb)->h_source;
1717                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1718                 break;
1719         }
1720
1721         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1722                            BR_MCAST_DIR_RX);
1723
1724         return err;
1725 }
1726 #endif
1727
1728 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1729                      struct sk_buff *skb, u16 vid)
1730 {
1731         int ret = 0;
1732
1733         BR_INPUT_SKB_CB(skb)->igmp = 0;
1734         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1735
1736         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1737                 return 0;
1738
1739         switch (skb->protocol) {
1740         case htons(ETH_P_IP):
1741                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1742                 break;
1743 #if IS_ENABLED(CONFIG_IPV6)
1744         case htons(ETH_P_IPV6):
1745                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1746                 break;
1747 #endif
1748         }
1749
1750         return ret;
1751 }
1752
1753 static void br_multicast_query_expired(struct net_bridge *br,
1754                                        struct bridge_mcast_own_query *query,
1755                                        struct bridge_mcast_querier *querier)
1756 {
1757         spin_lock(&br->multicast_lock);
1758         if (query->startup_sent < br->multicast_startup_query_count)
1759                 query->startup_sent++;
1760
1761         RCU_INIT_POINTER(querier->port, NULL);
1762         br_multicast_send_query(br, NULL, query);
1763         spin_unlock(&br->multicast_lock);
1764 }
1765
1766 static void br_ip4_multicast_query_expired(struct timer_list *t)
1767 {
1768         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1769
1770         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1771 }
1772
1773 #if IS_ENABLED(CONFIG_IPV6)
1774 static void br_ip6_multicast_query_expired(struct timer_list *t)
1775 {
1776         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1777
1778         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1779 }
1780 #endif
1781
1782 void br_multicast_init(struct net_bridge *br)
1783 {
1784         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1785
1786         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1787         br->multicast_last_member_count = 2;
1788         br->multicast_startup_query_count = 2;
1789
1790         br->multicast_last_member_interval = HZ;
1791         br->multicast_query_response_interval = 10 * HZ;
1792         br->multicast_startup_query_interval = 125 * HZ / 4;
1793         br->multicast_query_interval = 125 * HZ;
1794         br->multicast_querier_interval = 255 * HZ;
1795         br->multicast_membership_interval = 260 * HZ;
1796
1797         br->ip4_other_query.delay_time = 0;
1798         br->ip4_querier.port = NULL;
1799         br->multicast_igmp_version = 2;
1800 #if IS_ENABLED(CONFIG_IPV6)
1801         br->multicast_mld_version = 1;
1802         br->ip6_other_query.delay_time = 0;
1803         br->ip6_querier.port = NULL;
1804 #endif
1805         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1806         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1807
1808         spin_lock_init(&br->multicast_lock);
1809         timer_setup(&br->multicast_router_timer,
1810                     br_multicast_local_router_expired, 0);
1811         timer_setup(&br->ip4_other_query.timer,
1812                     br_ip4_multicast_querier_expired, 0);
1813         timer_setup(&br->ip4_own_query.timer,
1814                     br_ip4_multicast_query_expired, 0);
1815 #if IS_ENABLED(CONFIG_IPV6)
1816         timer_setup(&br->ip6_other_query.timer,
1817                     br_ip6_multicast_querier_expired, 0);
1818         timer_setup(&br->ip6_own_query.timer,
1819                     br_ip6_multicast_query_expired, 0);
1820 #endif
1821         INIT_HLIST_HEAD(&br->mdb_list);
1822 }
1823
1824 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1825 {
1826         struct in_device *in_dev = in_dev_get(br->dev);
1827
1828         if (!in_dev)
1829                 return;
1830
1831         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1832         in_dev_put(in_dev);
1833 }
1834
1835 #if IS_ENABLED(CONFIG_IPV6)
1836 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1837 {
1838         struct in6_addr addr;
1839
1840         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1841         ipv6_dev_mc_inc(br->dev, &addr);
1842 }
1843 #else
1844 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1845 {
1846 }
1847 #endif
1848
1849 static void br_multicast_join_snoopers(struct net_bridge *br)
1850 {
1851         br_ip4_multicast_join_snoopers(br);
1852         br_ip6_multicast_join_snoopers(br);
1853 }
1854
1855 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1856 {
1857         struct in_device *in_dev = in_dev_get(br->dev);
1858
1859         if (WARN_ON(!in_dev))
1860                 return;
1861
1862         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1863         in_dev_put(in_dev);
1864 }
1865
1866 #if IS_ENABLED(CONFIG_IPV6)
1867 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1868 {
1869         struct in6_addr addr;
1870
1871         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1872         ipv6_dev_mc_dec(br->dev, &addr);
1873 }
1874 #else
1875 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1876 {
1877 }
1878 #endif
1879
1880 static void br_multicast_leave_snoopers(struct net_bridge *br)
1881 {
1882         br_ip4_multicast_leave_snoopers(br);
1883         br_ip6_multicast_leave_snoopers(br);
1884 }
1885
1886 static void __br_multicast_open(struct net_bridge *br,
1887                                 struct bridge_mcast_own_query *query)
1888 {
1889         query->startup_sent = 0;
1890
1891         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1892                 return;
1893
1894         mod_timer(&query->timer, jiffies);
1895 }
1896
1897 void br_multicast_open(struct net_bridge *br)
1898 {
1899         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1900                 br_multicast_join_snoopers(br);
1901
1902         __br_multicast_open(br, &br->ip4_own_query);
1903 #if IS_ENABLED(CONFIG_IPV6)
1904         __br_multicast_open(br, &br->ip6_own_query);
1905 #endif
1906 }
1907
1908 void br_multicast_stop(struct net_bridge *br)
1909 {
1910         del_timer_sync(&br->multicast_router_timer);
1911         del_timer_sync(&br->ip4_other_query.timer);
1912         del_timer_sync(&br->ip4_own_query.timer);
1913 #if IS_ENABLED(CONFIG_IPV6)
1914         del_timer_sync(&br->ip6_other_query.timer);
1915         del_timer_sync(&br->ip6_own_query.timer);
1916 #endif
1917
1918         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1919                 br_multicast_leave_snoopers(br);
1920 }
1921
1922 void br_multicast_dev_del(struct net_bridge *br)
1923 {
1924         struct net_bridge_mdb_entry *mp;
1925         struct hlist_node *tmp;
1926
1927         spin_lock_bh(&br->multicast_lock);
1928         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1929                 del_timer(&mp->timer);
1930                 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1931                                        br_mdb_rht_params);
1932                 hlist_del_rcu(&mp->mdb_node);
1933                 kfree_rcu(mp, rcu);
1934         }
1935         spin_unlock_bh(&br->multicast_lock);
1936
1937         rcu_barrier();
1938 }
1939
1940 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1941 {
1942         int err = -EINVAL;
1943
1944         spin_lock_bh(&br->multicast_lock);
1945
1946         switch (val) {
1947         case MDB_RTR_TYPE_DISABLED:
1948         case MDB_RTR_TYPE_PERM:
1949                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1950                 del_timer(&br->multicast_router_timer);
1951                 br->multicast_router = val;
1952                 err = 0;
1953                 break;
1954         case MDB_RTR_TYPE_TEMP_QUERY:
1955                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1956                         br_mc_router_state_change(br, false);
1957                 br->multicast_router = val;
1958                 err = 0;
1959                 break;
1960         }
1961
1962         spin_unlock_bh(&br->multicast_lock);
1963
1964         return err;
1965 }
1966
1967 static void __del_port_router(struct net_bridge_port *p)
1968 {
1969         if (hlist_unhashed(&p->rlist))
1970                 return;
1971         hlist_del_init_rcu(&p->rlist);
1972         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1973         br_port_mc_router_state_change(p, false);
1974
1975         /* don't allow timer refresh */
1976         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1977                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1978 }
1979
1980 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1981 {
1982         struct net_bridge *br = p->br;
1983         unsigned long now = jiffies;
1984         int err = -EINVAL;
1985
1986         spin_lock(&br->multicast_lock);
1987         if (p->multicast_router == val) {
1988                 /* Refresh the temp router port timer */
1989                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1990                         mod_timer(&p->multicast_router_timer,
1991                                   now + br->multicast_querier_interval);
1992                 err = 0;
1993                 goto unlock;
1994         }
1995         switch (val) {
1996         case MDB_RTR_TYPE_DISABLED:
1997                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
1998                 __del_port_router(p);
1999                 del_timer(&p->multicast_router_timer);
2000                 break;
2001         case MDB_RTR_TYPE_TEMP_QUERY:
2002                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2003                 __del_port_router(p);
2004                 break;
2005         case MDB_RTR_TYPE_PERM:
2006                 p->multicast_router = MDB_RTR_TYPE_PERM;
2007                 del_timer(&p->multicast_router_timer);
2008                 br_multicast_add_router(br, p);
2009                 break;
2010         case MDB_RTR_TYPE_TEMP:
2011                 p->multicast_router = MDB_RTR_TYPE_TEMP;
2012                 br_multicast_mark_router(br, p);
2013                 break;
2014         default:
2015                 goto unlock;
2016         }
2017         err = 0;
2018 unlock:
2019         spin_unlock(&br->multicast_lock);
2020
2021         return err;
2022 }
2023
2024 static void br_multicast_start_querier(struct net_bridge *br,
2025                                        struct bridge_mcast_own_query *query)
2026 {
2027         struct net_bridge_port *port;
2028
2029         __br_multicast_open(br, query);
2030
2031         list_for_each_entry(port, &br->port_list, list) {
2032                 if (port->state == BR_STATE_DISABLED ||
2033                     port->state == BR_STATE_BLOCKING)
2034                         continue;
2035
2036                 if (query == &br->ip4_own_query)
2037                         br_multicast_enable(&port->ip4_own_query);
2038 #if IS_ENABLED(CONFIG_IPV6)
2039                 else
2040                         br_multicast_enable(&port->ip6_own_query);
2041 #endif
2042         }
2043 }
2044
2045 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2046 {
2047         struct net_bridge_port *port;
2048
2049         spin_lock_bh(&br->multicast_lock);
2050         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2051                 goto unlock;
2052
2053         br_mc_disabled_update(br->dev, val);
2054         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2055         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2056                 br_multicast_leave_snoopers(br);
2057                 goto unlock;
2058         }
2059
2060         if (!netif_running(br->dev))
2061                 goto unlock;
2062
2063         br_multicast_open(br);
2064         list_for_each_entry(port, &br->port_list, list)
2065                 __br_multicast_enable_port(port);
2066
2067 unlock:
2068         spin_unlock_bh(&br->multicast_lock);
2069
2070         return 0;
2071 }
2072
2073 bool br_multicast_enabled(const struct net_device *dev)
2074 {
2075         struct net_bridge *br = netdev_priv(dev);
2076
2077         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2078 }
2079 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2080
2081 bool br_multicast_router(const struct net_device *dev)
2082 {
2083         struct net_bridge *br = netdev_priv(dev);
2084         bool is_router;
2085
2086         spin_lock_bh(&br->multicast_lock);
2087         is_router = br_multicast_is_router(br);
2088         spin_unlock_bh(&br->multicast_lock);
2089         return is_router;
2090 }
2091 EXPORT_SYMBOL_GPL(br_multicast_router);
2092
2093 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2094 {
2095         unsigned long max_delay;
2096
2097         val = !!val;
2098
2099         spin_lock_bh(&br->multicast_lock);
2100         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2101                 goto unlock;
2102
2103         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2104         if (!val)
2105                 goto unlock;
2106
2107         max_delay = br->multicast_query_response_interval;
2108
2109         if (!timer_pending(&br->ip4_other_query.timer))
2110                 br->ip4_other_query.delay_time = jiffies + max_delay;
2111
2112         br_multicast_start_querier(br, &br->ip4_own_query);
2113
2114 #if IS_ENABLED(CONFIG_IPV6)
2115         if (!timer_pending(&br->ip6_other_query.timer))
2116                 br->ip6_other_query.delay_time = jiffies + max_delay;
2117
2118         br_multicast_start_querier(br, &br->ip6_own_query);
2119 #endif
2120
2121 unlock:
2122         spin_unlock_bh(&br->multicast_lock);
2123
2124         return 0;
2125 }
2126
2127 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2128 {
2129         /* Currently we support only version 2 and 3 */
2130         switch (val) {
2131         case 2:
2132         case 3:
2133                 break;
2134         default:
2135                 return -EINVAL;
2136         }
2137
2138         spin_lock_bh(&br->multicast_lock);
2139         br->multicast_igmp_version = val;
2140         spin_unlock_bh(&br->multicast_lock);
2141
2142         return 0;
2143 }
2144
2145 #if IS_ENABLED(CONFIG_IPV6)
2146 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2147 {
2148         /* Currently we support version 1 and 2 */
2149         switch (val) {
2150         case 1:
2151         case 2:
2152                 break;
2153         default:
2154                 return -EINVAL;
2155         }
2156
2157         spin_lock_bh(&br->multicast_lock);
2158         br->multicast_mld_version = val;
2159         spin_unlock_bh(&br->multicast_lock);
2160
2161         return 0;
2162 }
2163 #endif
2164
2165 /**
2166  * br_multicast_list_adjacent - Returns snooped multicast addresses
2167  * @dev:        The bridge port adjacent to which to retrieve addresses
2168  * @br_ip_list: The list to store found, snooped multicast IP addresses in
2169  *
2170  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2171  * snooping feature on all bridge ports of dev's bridge device, excluding
2172  * the addresses from dev itself.
2173  *
2174  * Returns the number of items added to br_ip_list.
2175  *
2176  * Notes:
2177  * - br_ip_list needs to be initialized by caller
2178  * - br_ip_list might contain duplicates in the end
2179  *   (needs to be taken care of by caller)
2180  * - br_ip_list needs to be freed by caller
2181  */
2182 int br_multicast_list_adjacent(struct net_device *dev,
2183                                struct list_head *br_ip_list)
2184 {
2185         struct net_bridge *br;
2186         struct net_bridge_port *port;
2187         struct net_bridge_port_group *group;
2188         struct br_ip_list *entry;
2189         int count = 0;
2190
2191         rcu_read_lock();
2192         if (!br_ip_list || !br_port_exists(dev))
2193                 goto unlock;
2194
2195         port = br_port_get_rcu(dev);
2196         if (!port || !port->br)
2197                 goto unlock;
2198
2199         br = port->br;
2200
2201         list_for_each_entry_rcu(port, &br->port_list, list) {
2202                 if (!port->dev || port->dev == dev)
2203                         continue;
2204
2205                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2206                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2207                         if (!entry)
2208                                 goto unlock;
2209
2210                         entry->addr = group->addr;
2211                         list_add(&entry->list, br_ip_list);
2212                         count++;
2213                 }
2214         }
2215
2216 unlock:
2217         rcu_read_unlock();
2218         return count;
2219 }
2220 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2221
2222 /**
2223  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2224  * @dev: The bridge port providing the bridge on which to check for a querier
2225  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2226  *
2227  * Checks whether the given interface has a bridge on top and if so returns
2228  * true if a valid querier exists anywhere on the bridged link layer.
2229  * Otherwise returns false.
2230  */
2231 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2232 {
2233         struct net_bridge *br;
2234         struct net_bridge_port *port;
2235         struct ethhdr eth;
2236         bool ret = false;
2237
2238         rcu_read_lock();
2239         if (!br_port_exists(dev))
2240                 goto unlock;
2241
2242         port = br_port_get_rcu(dev);
2243         if (!port || !port->br)
2244                 goto unlock;
2245
2246         br = port->br;
2247
2248         memset(&eth, 0, sizeof(eth));
2249         eth.h_proto = htons(proto);
2250
2251         ret = br_multicast_querier_exists(br, &eth);
2252
2253 unlock:
2254         rcu_read_unlock();
2255         return ret;
2256 }
2257 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2258
2259 /**
2260  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2261  * @dev: The bridge port adjacent to which to check for a querier
2262  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2263  *
2264  * Checks whether the given interface has a bridge on top and if so returns
2265  * true if a selected querier is behind one of the other ports of this
2266  * bridge. Otherwise returns false.
2267  */
2268 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2269 {
2270         struct net_bridge *br;
2271         struct net_bridge_port *port;
2272         bool ret = false;
2273
2274         rcu_read_lock();
2275         if (!br_port_exists(dev))
2276                 goto unlock;
2277
2278         port = br_port_get_rcu(dev);
2279         if (!port || !port->br)
2280                 goto unlock;
2281
2282         br = port->br;
2283
2284         switch (proto) {
2285         case ETH_P_IP:
2286                 if (!timer_pending(&br->ip4_other_query.timer) ||
2287                     rcu_dereference(br->ip4_querier.port) == port)
2288                         goto unlock;
2289                 break;
2290 #if IS_ENABLED(CONFIG_IPV6)
2291         case ETH_P_IPV6:
2292                 if (!timer_pending(&br->ip6_other_query.timer) ||
2293                     rcu_dereference(br->ip6_querier.port) == port)
2294                         goto unlock;
2295                 break;
2296 #endif
2297         default:
2298                 goto unlock;
2299         }
2300
2301         ret = true;
2302 unlock:
2303         rcu_read_unlock();
2304         return ret;
2305 }
2306 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2307
2308 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2309                                const struct sk_buff *skb, u8 type, u8 dir)
2310 {
2311         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2312         __be16 proto = skb->protocol;
2313         unsigned int t_len;
2314
2315         u64_stats_update_begin(&pstats->syncp);
2316         switch (proto) {
2317         case htons(ETH_P_IP):
2318                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2319                 switch (type) {
2320                 case IGMP_HOST_MEMBERSHIP_REPORT:
2321                         pstats->mstats.igmp_v1reports[dir]++;
2322                         break;
2323                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2324                         pstats->mstats.igmp_v2reports[dir]++;
2325                         break;
2326                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2327                         pstats->mstats.igmp_v3reports[dir]++;
2328                         break;
2329                 case IGMP_HOST_MEMBERSHIP_QUERY:
2330                         if (t_len != sizeof(struct igmphdr)) {
2331                                 pstats->mstats.igmp_v3queries[dir]++;
2332                         } else {
2333                                 unsigned int offset = skb_transport_offset(skb);
2334                                 struct igmphdr *ih, _ihdr;
2335
2336                                 ih = skb_header_pointer(skb, offset,
2337                                                         sizeof(_ihdr), &_ihdr);
2338                                 if (!ih)
2339                                         break;
2340                                 if (!ih->code)
2341                                         pstats->mstats.igmp_v1queries[dir]++;
2342                                 else
2343                                         pstats->mstats.igmp_v2queries[dir]++;
2344                         }
2345                         break;
2346                 case IGMP_HOST_LEAVE_MESSAGE:
2347                         pstats->mstats.igmp_leaves[dir]++;
2348                         break;
2349                 }
2350                 break;
2351 #if IS_ENABLED(CONFIG_IPV6)
2352         case htons(ETH_P_IPV6):
2353                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2354                         sizeof(struct ipv6hdr);
2355                 t_len -= skb_network_header_len(skb);
2356                 switch (type) {
2357                 case ICMPV6_MGM_REPORT:
2358                         pstats->mstats.mld_v1reports[dir]++;
2359                         break;
2360                 case ICMPV6_MLD2_REPORT:
2361                         pstats->mstats.mld_v2reports[dir]++;
2362                         break;
2363                 case ICMPV6_MGM_QUERY:
2364                         if (t_len != sizeof(struct mld_msg))
2365                                 pstats->mstats.mld_v2queries[dir]++;
2366                         else
2367                                 pstats->mstats.mld_v1queries[dir]++;
2368                         break;
2369                 case ICMPV6_MGM_REDUCTION:
2370                         pstats->mstats.mld_leaves[dir]++;
2371                         break;
2372                 }
2373                 break;
2374 #endif /* CONFIG_IPV6 */
2375         }
2376         u64_stats_update_end(&pstats->syncp);
2377 }
2378
2379 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2380                         const struct sk_buff *skb, u8 type, u8 dir)
2381 {
2382         struct bridge_mcast_stats __percpu *stats;
2383
2384         /* if multicast_disabled is true then igmp type can't be set */
2385         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2386                 return;
2387
2388         if (p)
2389                 stats = p->mcast_stats;
2390         else
2391                 stats = br->mcast_stats;
2392         if (WARN_ON(!stats))
2393                 return;
2394
2395         br_mcast_stats_add(stats, skb, type, dir);
2396 }
2397
2398 int br_multicast_init_stats(struct net_bridge *br)
2399 {
2400         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2401         if (!br->mcast_stats)
2402                 return -ENOMEM;
2403
2404         return 0;
2405 }
2406
2407 void br_multicast_uninit_stats(struct net_bridge *br)
2408 {
2409         free_percpu(br->mcast_stats);
2410 }
2411
2412 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2413 {
2414         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2415         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2416 }
2417
2418 void br_multicast_get_stats(const struct net_bridge *br,
2419                             const struct net_bridge_port *p,
2420                             struct br_mcast_stats *dest)
2421 {
2422         struct bridge_mcast_stats __percpu *stats;
2423         struct br_mcast_stats tdst;
2424         int i;
2425
2426         memset(dest, 0, sizeof(*dest));
2427         if (p)
2428                 stats = p->mcast_stats;
2429         else
2430                 stats = br->mcast_stats;
2431         if (WARN_ON(!stats))
2432                 return;
2433
2434         memset(&tdst, 0, sizeof(tdst));
2435         for_each_possible_cpu(i) {
2436                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2437                 struct br_mcast_stats temp;
2438                 unsigned int start;
2439
2440                 do {
2441                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2442                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2443                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2444
2445                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2446                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2447                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2448                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2449                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2450                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2451                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2452                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2453
2454                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2455                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2456                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2457                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2458                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2459                 tdst.mld_parse_errors += temp.mld_parse_errors;
2460         }
2461         memcpy(dest, &tdst, sizeof(*dest));
2462 }
2463
2464 int br_mdb_hash_init(struct net_bridge *br)
2465 {
2466         return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2467 }
2468
2469 void br_mdb_hash_fini(struct net_bridge *br)
2470 {
2471         rhashtable_destroy(&br->mdb_hash_tbl);
2472 }