Merge tag 'reset-for-v5.3' of git://git.pengutronix.de/git/pza/linux into arm/drivers
[sfrench/cifs-2.6.git] / net / bridge / br_vlan.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14                               const void *ptr)
15 {
16         const struct net_bridge_vlan *vle = ptr;
17         u16 vid = *(u16 *)arg->key;
18
19         return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23         .head_offset = offsetof(struct net_bridge_vlan, vnode),
24         .key_offset = offsetof(struct net_bridge_vlan, vid),
25         .key_len = sizeof(u16),
26         .nelem_hint = 3,
27         .max_size = VLAN_N_VID,
28         .obj_cmpfn = br_vlan_cmp,
29         .automatic_shrinking = true,
30 };
31
32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
38 {
39         if (vg->pvid == vid)
40                 return false;
41
42         smp_wmb();
43         vg->pvid = vid;
44
45         return true;
46 }
47
48 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49 {
50         if (vg->pvid != vid)
51                 return false;
52
53         smp_wmb();
54         vg->pvid = 0;
55
56         return true;
57 }
58
59 /* return true if anything changed, false otherwise */
60 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
61 {
62         struct net_bridge_vlan_group *vg;
63         u16 old_flags = v->flags;
64         bool ret;
65
66         if (br_vlan_is_master(v))
67                 vg = br_vlan_group(v->br);
68         else
69                 vg = nbp_vlan_group(v->port);
70
71         if (flags & BRIDGE_VLAN_INFO_PVID)
72                 ret = __vlan_add_pvid(vg, v->vid);
73         else
74                 ret = __vlan_delete_pvid(vg, v->vid);
75
76         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
77                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
78         else
79                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
80
81         return ret || !!(old_flags ^ v->flags);
82 }
83
84 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
85                           struct net_bridge_vlan *v, u16 flags,
86                           struct netlink_ext_ack *extack)
87 {
88         int err;
89
90         /* Try switchdev op first. In case it is not supported, fallback to
91          * 8021q add.
92          */
93         err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
94         if (err == -EOPNOTSUPP)
95                 return vlan_vid_add(dev, br->vlan_proto, v->vid);
96         v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
97         return err;
98 }
99
100 static void __vlan_add_list(struct net_bridge_vlan *v)
101 {
102         struct net_bridge_vlan_group *vg;
103         struct list_head *headp, *hpos;
104         struct net_bridge_vlan *vent;
105
106         if (br_vlan_is_master(v))
107                 vg = br_vlan_group(v->br);
108         else
109                 vg = nbp_vlan_group(v->port);
110
111         headp = &vg->vlan_list;
112         list_for_each_prev(hpos, headp) {
113                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
114                 if (v->vid < vent->vid)
115                         continue;
116                 else
117                         break;
118         }
119         list_add_rcu(&v->vlist, hpos);
120 }
121
122 static void __vlan_del_list(struct net_bridge_vlan *v)
123 {
124         list_del_rcu(&v->vlist);
125 }
126
127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128                           const struct net_bridge_vlan *v)
129 {
130         int err;
131
132         /* Try switchdev op first. In case it is not supported, fallback to
133          * 8021q del.
134          */
135         err = br_switchdev_port_vlan_del(dev, v->vid);
136         if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
137                 vlan_vid_del(dev, br->vlan_proto, v->vid);
138         return err == -EOPNOTSUPP ? 0 : err;
139 }
140
141 /* Returns a master vlan, if it didn't exist it gets created. In all cases a
142  * a reference is taken to the master vlan before returning.
143  */
144 static struct net_bridge_vlan *
145 br_vlan_get_master(struct net_bridge *br, u16 vid,
146                    struct netlink_ext_ack *extack)
147 {
148         struct net_bridge_vlan_group *vg;
149         struct net_bridge_vlan *masterv;
150
151         vg = br_vlan_group(br);
152         masterv = br_vlan_find(vg, vid);
153         if (!masterv) {
154                 bool changed;
155
156                 /* missing global ctx, create it now */
157                 if (br_vlan_add(br, vid, 0, &changed, extack))
158                         return NULL;
159                 masterv = br_vlan_find(vg, vid);
160                 if (WARN_ON(!masterv))
161                         return NULL;
162                 refcount_set(&masterv->refcnt, 1);
163                 return masterv;
164         }
165         refcount_inc(&masterv->refcnt);
166
167         return masterv;
168 }
169
170 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
171 {
172         struct net_bridge_vlan *v;
173
174         v = container_of(rcu, struct net_bridge_vlan, rcu);
175         WARN_ON(!br_vlan_is_master(v));
176         free_percpu(v->stats);
177         v->stats = NULL;
178         kfree(v);
179 }
180
181 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
182 {
183         struct net_bridge_vlan_group *vg;
184
185         if (!br_vlan_is_master(masterv))
186                 return;
187
188         vg = br_vlan_group(masterv->br);
189         if (refcount_dec_and_test(&masterv->refcnt)) {
190                 rhashtable_remove_fast(&vg->vlan_hash,
191                                        &masterv->vnode, br_vlan_rht_params);
192                 __vlan_del_list(masterv);
193                 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
194         }
195 }
196
197 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
198 {
199         struct net_bridge_vlan *v;
200
201         v = container_of(rcu, struct net_bridge_vlan, rcu);
202         WARN_ON(br_vlan_is_master(v));
203         /* if we had per-port stats configured then free them here */
204         if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
205                 free_percpu(v->stats);
206         v->stats = NULL;
207         kfree(v);
208 }
209
210 /* This is the shared VLAN add function which works for both ports and bridge
211  * devices. There are four possible calls to this function in terms of the
212  * vlan entry type:
213  * 1. vlan is being added on a port (no master flags, global entry exists)
214  * 2. vlan is being added on a bridge (both master and brentry flags)
215  * 3. vlan is being added on a port, but a global entry didn't exist which
216  *    is being created right now (master flag set, brentry flag unset), the
217  *    global entry is used for global per-vlan features, but not for filtering
218  * 4. same as 3 but with both master and brentry flags set so the entry
219  *    will be used for filtering in both the port and the bridge
220  */
221 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
222                       struct netlink_ext_ack *extack)
223 {
224         struct net_bridge_vlan *masterv = NULL;
225         struct net_bridge_port *p = NULL;
226         struct net_bridge_vlan_group *vg;
227         struct net_device *dev;
228         struct net_bridge *br;
229         int err;
230
231         if (br_vlan_is_master(v)) {
232                 br = v->br;
233                 dev = br->dev;
234                 vg = br_vlan_group(br);
235         } else {
236                 p = v->port;
237                 br = p->br;
238                 dev = p->dev;
239                 vg = nbp_vlan_group(p);
240         }
241
242         if (p) {
243                 /* Add VLAN to the device filter if it is supported.
244                  * This ensures tagged traffic enters the bridge when
245                  * promiscuous mode is disabled by br_manage_promisc().
246                  */
247                 err = __vlan_vid_add(dev, br, v, flags, extack);
248                 if (err)
249                         goto out;
250
251                 /* need to work on the master vlan too */
252                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
253                         bool changed;
254
255                         err = br_vlan_add(br, v->vid,
256                                           flags | BRIDGE_VLAN_INFO_BRENTRY,
257                                           &changed, extack);
258                         if (err)
259                                 goto out_filt;
260                 }
261
262                 masterv = br_vlan_get_master(br, v->vid, extack);
263                 if (!masterv)
264                         goto out_filt;
265                 v->brvlan = masterv;
266                 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
267                         v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
268                         if (!v->stats) {
269                                 err = -ENOMEM;
270                                 goto out_filt;
271                         }
272                         v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
273                 } else {
274                         v->stats = masterv->stats;
275                 }
276         } else {
277                 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
278                 if (err && err != -EOPNOTSUPP)
279                         goto out;
280         }
281
282         /* Add the dev mac and count the vlan only if it's usable */
283         if (br_vlan_should_use(v)) {
284                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
285                 if (err) {
286                         br_err(br, "failed insert local address into bridge forwarding table\n");
287                         goto out_filt;
288                 }
289                 vg->num_vlans++;
290         }
291
292         err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
293                                             br_vlan_rht_params);
294         if (err)
295                 goto out_fdb_insert;
296
297         __vlan_add_list(v);
298         __vlan_add_flags(v, flags);
299
300         if (p)
301                 nbp_vlan_set_vlan_dev_state(p, v->vid);
302 out:
303         return err;
304
305 out_fdb_insert:
306         if (br_vlan_should_use(v)) {
307                 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
308                 vg->num_vlans--;
309         }
310
311 out_filt:
312         if (p) {
313                 __vlan_vid_del(dev, br, v);
314                 if (masterv) {
315                         if (v->stats && masterv->stats != v->stats)
316                                 free_percpu(v->stats);
317                         v->stats = NULL;
318
319                         br_vlan_put_master(masterv);
320                         v->brvlan = NULL;
321                 }
322         } else {
323                 br_switchdev_port_vlan_del(dev, v->vid);
324         }
325
326         goto out;
327 }
328
329 static int __vlan_del(struct net_bridge_vlan *v)
330 {
331         struct net_bridge_vlan *masterv = v;
332         struct net_bridge_vlan_group *vg;
333         struct net_bridge_port *p = NULL;
334         int err = 0;
335
336         if (br_vlan_is_master(v)) {
337                 vg = br_vlan_group(v->br);
338         } else {
339                 p = v->port;
340                 vg = nbp_vlan_group(v->port);
341                 masterv = v->brvlan;
342         }
343
344         __vlan_delete_pvid(vg, v->vid);
345         if (p) {
346                 err = __vlan_vid_del(p->dev, p->br, v);
347                 if (err)
348                         goto out;
349         } else {
350                 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
351                 if (err && err != -EOPNOTSUPP)
352                         goto out;
353                 err = 0;
354         }
355
356         if (br_vlan_should_use(v)) {
357                 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
358                 vg->num_vlans--;
359         }
360
361         if (masterv != v) {
362                 vlan_tunnel_info_del(vg, v);
363                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
364                                        br_vlan_rht_params);
365                 __vlan_del_list(v);
366                 nbp_vlan_set_vlan_dev_state(p, v->vid);
367                 call_rcu(&v->rcu, nbp_vlan_rcu_free);
368         }
369
370         br_vlan_put_master(masterv);
371 out:
372         return err;
373 }
374
375 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
376 {
377         WARN_ON(!list_empty(&vg->vlan_list));
378         rhashtable_destroy(&vg->vlan_hash);
379         vlan_tunnel_deinit(vg);
380         kfree(vg);
381 }
382
383 static void __vlan_flush(struct net_bridge_vlan_group *vg)
384 {
385         struct net_bridge_vlan *vlan, *tmp;
386
387         __vlan_delete_pvid(vg, vg->pvid);
388         list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
389                 __vlan_del(vlan);
390 }
391
392 struct sk_buff *br_handle_vlan(struct net_bridge *br,
393                                const struct net_bridge_port *p,
394                                struct net_bridge_vlan_group *vg,
395                                struct sk_buff *skb)
396 {
397         struct br_vlan_stats *stats;
398         struct net_bridge_vlan *v;
399         u16 vid;
400
401         /* If this packet was not filtered at input, let it pass */
402         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
403                 goto out;
404
405         /* At this point, we know that the frame was filtered and contains
406          * a valid vlan id.  If the vlan id has untagged flag set,
407          * send untagged; otherwise, send tagged.
408          */
409         br_vlan_get_tag(skb, &vid);
410         v = br_vlan_find(vg, vid);
411         /* Vlan entry must be configured at this point.  The
412          * only exception is the bridge is set in promisc mode and the
413          * packet is destined for the bridge device.  In this case
414          * pass the packet as is.
415          */
416         if (!v || !br_vlan_should_use(v)) {
417                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
418                         goto out;
419                 } else {
420                         kfree_skb(skb);
421                         return NULL;
422                 }
423         }
424         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
425                 stats = this_cpu_ptr(v->stats);
426                 u64_stats_update_begin(&stats->syncp);
427                 stats->tx_bytes += skb->len;
428                 stats->tx_packets++;
429                 u64_stats_update_end(&stats->syncp);
430         }
431
432         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
433                 __vlan_hwaccel_clear_tag(skb);
434
435         if (p && (p->flags & BR_VLAN_TUNNEL) &&
436             br_handle_egress_vlan_tunnel(skb, v)) {
437                 kfree_skb(skb);
438                 return NULL;
439         }
440 out:
441         return skb;
442 }
443
444 /* Called under RCU */
445 static bool __allowed_ingress(const struct net_bridge *br,
446                               struct net_bridge_vlan_group *vg,
447                               struct sk_buff *skb, u16 *vid)
448 {
449         struct br_vlan_stats *stats;
450         struct net_bridge_vlan *v;
451         bool tagged;
452
453         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
454         /* If vlan tx offload is disabled on bridge device and frame was
455          * sent from vlan device on the bridge device, it does not have
456          * HW accelerated vlan tag.
457          */
458         if (unlikely(!skb_vlan_tag_present(skb) &&
459                      skb->protocol == br->vlan_proto)) {
460                 skb = skb_vlan_untag(skb);
461                 if (unlikely(!skb))
462                         return false;
463         }
464
465         if (!br_vlan_get_tag(skb, vid)) {
466                 /* Tagged frame */
467                 if (skb->vlan_proto != br->vlan_proto) {
468                         /* Protocol-mismatch, empty out vlan_tci for new tag */
469                         skb_push(skb, ETH_HLEN);
470                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
471                                                         skb_vlan_tag_get(skb));
472                         if (unlikely(!skb))
473                                 return false;
474
475                         skb_pull(skb, ETH_HLEN);
476                         skb_reset_mac_len(skb);
477                         *vid = 0;
478                         tagged = false;
479                 } else {
480                         tagged = true;
481                 }
482         } else {
483                 /* Untagged frame */
484                 tagged = false;
485         }
486
487         if (!*vid) {
488                 u16 pvid = br_get_pvid(vg);
489
490                 /* Frame had a tag with VID 0 or did not have a tag.
491                  * See if pvid is set on this port.  That tells us which
492                  * vlan untagged or priority-tagged traffic belongs to.
493                  */
494                 if (!pvid)
495                         goto drop;
496
497                 /* PVID is set on this port.  Any untagged or priority-tagged
498                  * ingress frame is considered to belong to this vlan.
499                  */
500                 *vid = pvid;
501                 if (likely(!tagged))
502                         /* Untagged Frame. */
503                         __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
504                 else
505                         /* Priority-tagged Frame.
506                          * At this point, we know that skb->vlan_tci VID
507                          * field was 0.
508                          * We update only VID field and preserve PCP field.
509                          */
510                         skb->vlan_tci |= pvid;
511
512                 /* if stats are disabled we can avoid the lookup */
513                 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
514                         return true;
515         }
516         v = br_vlan_find(vg, *vid);
517         if (!v || !br_vlan_should_use(v))
518                 goto drop;
519
520         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
521                 stats = this_cpu_ptr(v->stats);
522                 u64_stats_update_begin(&stats->syncp);
523                 stats->rx_bytes += skb->len;
524                 stats->rx_packets++;
525                 u64_stats_update_end(&stats->syncp);
526         }
527
528         return true;
529
530 drop:
531         kfree_skb(skb);
532         return false;
533 }
534
535 bool br_allowed_ingress(const struct net_bridge *br,
536                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
537                         u16 *vid)
538 {
539         /* If VLAN filtering is disabled on the bridge, all packets are
540          * permitted.
541          */
542         if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
543                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
544                 return true;
545         }
546
547         return __allowed_ingress(br, vg, skb, vid);
548 }
549
550 /* Called under RCU. */
551 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
552                        const struct sk_buff *skb)
553 {
554         const struct net_bridge_vlan *v;
555         u16 vid;
556
557         /* If this packet was not filtered at input, let it pass */
558         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
559                 return true;
560
561         br_vlan_get_tag(skb, &vid);
562         v = br_vlan_find(vg, vid);
563         if (v && br_vlan_should_use(v))
564                 return true;
565
566         return false;
567 }
568
569 /* Called under RCU */
570 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
571 {
572         struct net_bridge_vlan_group *vg;
573         struct net_bridge *br = p->br;
574
575         /* If filtering was disabled at input, let it pass. */
576         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
577                 return true;
578
579         vg = nbp_vlan_group_rcu(p);
580         if (!vg || !vg->num_vlans)
581                 return false;
582
583         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
584                 *vid = 0;
585
586         if (!*vid) {
587                 *vid = br_get_pvid(vg);
588                 if (!*vid)
589                         return false;
590
591                 return true;
592         }
593
594         if (br_vlan_find(vg, *vid))
595                 return true;
596
597         return false;
598 }
599
600 static int br_vlan_add_existing(struct net_bridge *br,
601                                 struct net_bridge_vlan_group *vg,
602                                 struct net_bridge_vlan *vlan,
603                                 u16 flags, bool *changed,
604                                 struct netlink_ext_ack *extack)
605 {
606         int err;
607
608         err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
609         if (err && err != -EOPNOTSUPP)
610                 return err;
611
612         if (!br_vlan_is_brentry(vlan)) {
613                 /* Trying to change flags of non-existent bridge vlan */
614                 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
615                         err = -EINVAL;
616                         goto err_flags;
617                 }
618                 /* It was only kept for port vlans, now make it real */
619                 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
620                                     vlan->vid);
621                 if (err) {
622                         br_err(br, "failed to insert local address into bridge forwarding table\n");
623                         goto err_fdb_insert;
624                 }
625
626                 refcount_inc(&vlan->refcnt);
627                 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
628                 vg->num_vlans++;
629                 *changed = true;
630         }
631
632         if (__vlan_add_flags(vlan, flags))
633                 *changed = true;
634
635         return 0;
636
637 err_fdb_insert:
638 err_flags:
639         br_switchdev_port_vlan_del(br->dev, vlan->vid);
640         return err;
641 }
642
643 /* Must be protected by RTNL.
644  * Must be called with vid in range from 1 to 4094 inclusive.
645  * changed must be true only if the vlan was created or updated
646  */
647 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
648                 struct netlink_ext_ack *extack)
649 {
650         struct net_bridge_vlan_group *vg;
651         struct net_bridge_vlan *vlan;
652         int ret;
653
654         ASSERT_RTNL();
655
656         *changed = false;
657         vg = br_vlan_group(br);
658         vlan = br_vlan_find(vg, vid);
659         if (vlan)
660                 return br_vlan_add_existing(br, vg, vlan, flags, changed,
661                                             extack);
662
663         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
664         if (!vlan)
665                 return -ENOMEM;
666
667         vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
668         if (!vlan->stats) {
669                 kfree(vlan);
670                 return -ENOMEM;
671         }
672         vlan->vid = vid;
673         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
674         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
675         vlan->br = br;
676         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
677                 refcount_set(&vlan->refcnt, 1);
678         ret = __vlan_add(vlan, flags, extack);
679         if (ret) {
680                 free_percpu(vlan->stats);
681                 kfree(vlan);
682         } else {
683                 *changed = true;
684         }
685
686         return ret;
687 }
688
689 /* Must be protected by RTNL.
690  * Must be called with vid in range from 1 to 4094 inclusive.
691  */
692 int br_vlan_delete(struct net_bridge *br, u16 vid)
693 {
694         struct net_bridge_vlan_group *vg;
695         struct net_bridge_vlan *v;
696
697         ASSERT_RTNL();
698
699         vg = br_vlan_group(br);
700         v = br_vlan_find(vg, vid);
701         if (!v || !br_vlan_is_brentry(v))
702                 return -ENOENT;
703
704         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
705         br_fdb_delete_by_port(br, NULL, vid, 0);
706
707         vlan_tunnel_info_del(vg, v);
708
709         return __vlan_del(v);
710 }
711
712 void br_vlan_flush(struct net_bridge *br)
713 {
714         struct net_bridge_vlan_group *vg;
715
716         ASSERT_RTNL();
717
718         vg = br_vlan_group(br);
719         __vlan_flush(vg);
720         RCU_INIT_POINTER(br->vlgrp, NULL);
721         synchronize_rcu();
722         __vlan_group_free(vg);
723 }
724
725 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
726 {
727         if (!vg)
728                 return NULL;
729
730         return br_vlan_lookup(&vg->vlan_hash, vid);
731 }
732
733 /* Must be protected by RTNL. */
734 static void recalculate_group_addr(struct net_bridge *br)
735 {
736         if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
737                 return;
738
739         spin_lock_bh(&br->lock);
740         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
741             br->vlan_proto == htons(ETH_P_8021Q)) {
742                 /* Bridge Group Address */
743                 br->group_addr[5] = 0x00;
744         } else { /* vlan_enabled && ETH_P_8021AD */
745                 /* Provider Bridge Group Address */
746                 br->group_addr[5] = 0x08;
747         }
748         spin_unlock_bh(&br->lock);
749 }
750
751 /* Must be protected by RTNL. */
752 void br_recalculate_fwd_mask(struct net_bridge *br)
753 {
754         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
755             br->vlan_proto == htons(ETH_P_8021Q))
756                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
757         else /* vlan_enabled && ETH_P_8021AD */
758                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
759                                               ~(1u << br->group_addr[5]);
760 }
761
762 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
763 {
764         struct switchdev_attr attr = {
765                 .orig_dev = br->dev,
766                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
767                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
768                 .u.vlan_filtering = val,
769         };
770         int err;
771
772         if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
773                 return 0;
774
775         err = switchdev_port_attr_set(br->dev, &attr);
776         if (err && err != -EOPNOTSUPP)
777                 return err;
778
779         br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
780         br_manage_promisc(br);
781         recalculate_group_addr(br);
782         br_recalculate_fwd_mask(br);
783
784         return 0;
785 }
786
787 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
788 {
789         return __br_vlan_filter_toggle(br, val);
790 }
791
792 bool br_vlan_enabled(const struct net_device *dev)
793 {
794         struct net_bridge *br = netdev_priv(dev);
795
796         return br_opt_get(br, BROPT_VLAN_ENABLED);
797 }
798 EXPORT_SYMBOL_GPL(br_vlan_enabled);
799
800 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
801 {
802         int err = 0;
803         struct net_bridge_port *p;
804         struct net_bridge_vlan *vlan;
805         struct net_bridge_vlan_group *vg;
806         __be16 oldproto;
807
808         if (br->vlan_proto == proto)
809                 return 0;
810
811         /* Add VLANs for the new proto to the device filter. */
812         list_for_each_entry(p, &br->port_list, list) {
813                 vg = nbp_vlan_group(p);
814                 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
815                         err = vlan_vid_add(p->dev, proto, vlan->vid);
816                         if (err)
817                                 goto err_filt;
818                 }
819         }
820
821         oldproto = br->vlan_proto;
822         br->vlan_proto = proto;
823
824         recalculate_group_addr(br);
825         br_recalculate_fwd_mask(br);
826
827         /* Delete VLANs for the old proto from the device filter. */
828         list_for_each_entry(p, &br->port_list, list) {
829                 vg = nbp_vlan_group(p);
830                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
831                         vlan_vid_del(p->dev, oldproto, vlan->vid);
832         }
833
834         return 0;
835
836 err_filt:
837         list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
838                 vlan_vid_del(p->dev, proto, vlan->vid);
839
840         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
841                 vg = nbp_vlan_group(p);
842                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
843                         vlan_vid_del(p->dev, proto, vlan->vid);
844         }
845
846         return err;
847 }
848
849 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
850 {
851         if (val != ETH_P_8021Q && val != ETH_P_8021AD)
852                 return -EPROTONOSUPPORT;
853
854         return __br_vlan_set_proto(br, htons(val));
855 }
856
857 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
858 {
859         switch (val) {
860         case 0:
861         case 1:
862                 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
863                 break;
864         default:
865                 return -EINVAL;
866         }
867
868         return 0;
869 }
870
871 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
872 {
873         struct net_bridge_port *p;
874
875         /* allow to change the option if there are no port vlans configured */
876         list_for_each_entry(p, &br->port_list, list) {
877                 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
878
879                 if (vg->num_vlans)
880                         return -EBUSY;
881         }
882
883         switch (val) {
884         case 0:
885         case 1:
886                 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
887                 break;
888         default:
889                 return -EINVAL;
890         }
891
892         return 0;
893 }
894
895 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
896 {
897         struct net_bridge_vlan *v;
898
899         if (vid != vg->pvid)
900                 return false;
901
902         v = br_vlan_lookup(&vg->vlan_hash, vid);
903         if (v && br_vlan_should_use(v) &&
904             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
905                 return true;
906
907         return false;
908 }
909
910 static void br_vlan_disable_default_pvid(struct net_bridge *br)
911 {
912         struct net_bridge_port *p;
913         u16 pvid = br->default_pvid;
914
915         /* Disable default_pvid on all ports where it is still
916          * configured.
917          */
918         if (vlan_default_pvid(br_vlan_group(br), pvid))
919                 br_vlan_delete(br, pvid);
920
921         list_for_each_entry(p, &br->port_list, list) {
922                 if (vlan_default_pvid(nbp_vlan_group(p), pvid))
923                         nbp_vlan_delete(p, pvid);
924         }
925
926         br->default_pvid = 0;
927 }
928
929 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
930                                struct netlink_ext_ack *extack)
931 {
932         const struct net_bridge_vlan *pvent;
933         struct net_bridge_vlan_group *vg;
934         struct net_bridge_port *p;
935         unsigned long *changed;
936         bool vlchange;
937         u16 old_pvid;
938         int err = 0;
939
940         if (!pvid) {
941                 br_vlan_disable_default_pvid(br);
942                 return 0;
943         }
944
945         changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
946         if (!changed)
947                 return -ENOMEM;
948
949         old_pvid = br->default_pvid;
950
951         /* Update default_pvid config only if we do not conflict with
952          * user configuration.
953          */
954         vg = br_vlan_group(br);
955         pvent = br_vlan_find(vg, pvid);
956         if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
957             (!pvent || !br_vlan_should_use(pvent))) {
958                 err = br_vlan_add(br, pvid,
959                                   BRIDGE_VLAN_INFO_PVID |
960                                   BRIDGE_VLAN_INFO_UNTAGGED |
961                                   BRIDGE_VLAN_INFO_BRENTRY,
962                                   &vlchange, extack);
963                 if (err)
964                         goto out;
965                 br_vlan_delete(br, old_pvid);
966                 set_bit(0, changed);
967         }
968
969         list_for_each_entry(p, &br->port_list, list) {
970                 /* Update default_pvid config only if we do not conflict with
971                  * user configuration.
972                  */
973                 vg = nbp_vlan_group(p);
974                 if ((old_pvid &&
975                      !vlan_default_pvid(vg, old_pvid)) ||
976                     br_vlan_find(vg, pvid))
977                         continue;
978
979                 err = nbp_vlan_add(p, pvid,
980                                    BRIDGE_VLAN_INFO_PVID |
981                                    BRIDGE_VLAN_INFO_UNTAGGED,
982                                    &vlchange, extack);
983                 if (err)
984                         goto err_port;
985                 nbp_vlan_delete(p, old_pvid);
986                 set_bit(p->port_no, changed);
987         }
988
989         br->default_pvid = pvid;
990
991 out:
992         bitmap_free(changed);
993         return err;
994
995 err_port:
996         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
997                 if (!test_bit(p->port_no, changed))
998                         continue;
999
1000                 if (old_pvid)
1001                         nbp_vlan_add(p, old_pvid,
1002                                      BRIDGE_VLAN_INFO_PVID |
1003                                      BRIDGE_VLAN_INFO_UNTAGGED,
1004                                      &vlchange, NULL);
1005                 nbp_vlan_delete(p, pvid);
1006         }
1007
1008         if (test_bit(0, changed)) {
1009                 if (old_pvid)
1010                         br_vlan_add(br, old_pvid,
1011                                     BRIDGE_VLAN_INFO_PVID |
1012                                     BRIDGE_VLAN_INFO_UNTAGGED |
1013                                     BRIDGE_VLAN_INFO_BRENTRY,
1014                                     &vlchange, NULL);
1015                 br_vlan_delete(br, pvid);
1016         }
1017         goto out;
1018 }
1019
1020 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1021 {
1022         u16 pvid = val;
1023         int err = 0;
1024
1025         if (val >= VLAN_VID_MASK)
1026                 return -EINVAL;
1027
1028         if (pvid == br->default_pvid)
1029                 goto out;
1030
1031         /* Only allow default pvid change when filtering is disabled */
1032         if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1033                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1034                 err = -EPERM;
1035                 goto out;
1036         }
1037         err = __br_vlan_set_default_pvid(br, pvid, NULL);
1038 out:
1039         return err;
1040 }
1041
1042 int br_vlan_init(struct net_bridge *br)
1043 {
1044         struct net_bridge_vlan_group *vg;
1045         int ret = -ENOMEM;
1046         bool changed;
1047
1048         vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1049         if (!vg)
1050                 goto out;
1051         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1052         if (ret)
1053                 goto err_rhtbl;
1054         ret = vlan_tunnel_init(vg);
1055         if (ret)
1056                 goto err_tunnel_init;
1057         INIT_LIST_HEAD(&vg->vlan_list);
1058         br->vlan_proto = htons(ETH_P_8021Q);
1059         br->default_pvid = 1;
1060         rcu_assign_pointer(br->vlgrp, vg);
1061         ret = br_vlan_add(br, 1,
1062                           BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
1063                           BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1064         if (ret)
1065                 goto err_vlan_add;
1066
1067 out:
1068         return ret;
1069
1070 err_vlan_add:
1071         vlan_tunnel_deinit(vg);
1072 err_tunnel_init:
1073         rhashtable_destroy(&vg->vlan_hash);
1074 err_rhtbl:
1075         kfree(vg);
1076
1077         goto out;
1078 }
1079
1080 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1081 {
1082         struct switchdev_attr attr = {
1083                 .orig_dev = p->br->dev,
1084                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1085                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1086                 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1087         };
1088         struct net_bridge_vlan_group *vg;
1089         int ret = -ENOMEM;
1090
1091         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1092         if (!vg)
1093                 goto out;
1094
1095         ret = switchdev_port_attr_set(p->dev, &attr);
1096         if (ret && ret != -EOPNOTSUPP)
1097                 goto err_vlan_enabled;
1098
1099         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1100         if (ret)
1101                 goto err_rhtbl;
1102         ret = vlan_tunnel_init(vg);
1103         if (ret)
1104                 goto err_tunnel_init;
1105         INIT_LIST_HEAD(&vg->vlan_list);
1106         rcu_assign_pointer(p->vlgrp, vg);
1107         if (p->br->default_pvid) {
1108                 bool changed;
1109
1110                 ret = nbp_vlan_add(p, p->br->default_pvid,
1111                                    BRIDGE_VLAN_INFO_PVID |
1112                                    BRIDGE_VLAN_INFO_UNTAGGED,
1113                                    &changed, extack);
1114                 if (ret)
1115                         goto err_vlan_add;
1116         }
1117 out:
1118         return ret;
1119
1120 err_vlan_add:
1121         RCU_INIT_POINTER(p->vlgrp, NULL);
1122         synchronize_rcu();
1123         vlan_tunnel_deinit(vg);
1124 err_tunnel_init:
1125         rhashtable_destroy(&vg->vlan_hash);
1126 err_rhtbl:
1127 err_vlan_enabled:
1128         kfree(vg);
1129
1130         goto out;
1131 }
1132
1133 /* Must be protected by RTNL.
1134  * Must be called with vid in range from 1 to 4094 inclusive.
1135  * changed must be true only if the vlan was created or updated
1136  */
1137 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1138                  bool *changed, struct netlink_ext_ack *extack)
1139 {
1140         struct net_bridge_vlan *vlan;
1141         int ret;
1142
1143         ASSERT_RTNL();
1144
1145         *changed = false;
1146         vlan = br_vlan_find(nbp_vlan_group(port), vid);
1147         if (vlan) {
1148                 /* Pass the flags to the hardware bridge */
1149                 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1150                 if (ret && ret != -EOPNOTSUPP)
1151                         return ret;
1152                 *changed = __vlan_add_flags(vlan, flags);
1153
1154                 return 0;
1155         }
1156
1157         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1158         if (!vlan)
1159                 return -ENOMEM;
1160
1161         vlan->vid = vid;
1162         vlan->port = port;
1163         ret = __vlan_add(vlan, flags, extack);
1164         if (ret)
1165                 kfree(vlan);
1166         else
1167                 *changed = true;
1168
1169         return ret;
1170 }
1171
1172 /* Must be protected by RTNL.
1173  * Must be called with vid in range from 1 to 4094 inclusive.
1174  */
1175 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1176 {
1177         struct net_bridge_vlan *v;
1178
1179         ASSERT_RTNL();
1180
1181         v = br_vlan_find(nbp_vlan_group(port), vid);
1182         if (!v)
1183                 return -ENOENT;
1184         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1185         br_fdb_delete_by_port(port->br, port, vid, 0);
1186
1187         return __vlan_del(v);
1188 }
1189
1190 void nbp_vlan_flush(struct net_bridge_port *port)
1191 {
1192         struct net_bridge_vlan_group *vg;
1193
1194         ASSERT_RTNL();
1195
1196         vg = nbp_vlan_group(port);
1197         __vlan_flush(vg);
1198         RCU_INIT_POINTER(port->vlgrp, NULL);
1199         synchronize_rcu();
1200         __vlan_group_free(vg);
1201 }
1202
1203 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1204                        struct br_vlan_stats *stats)
1205 {
1206         int i;
1207
1208         memset(stats, 0, sizeof(*stats));
1209         for_each_possible_cpu(i) {
1210                 u64 rxpackets, rxbytes, txpackets, txbytes;
1211                 struct br_vlan_stats *cpu_stats;
1212                 unsigned int start;
1213
1214                 cpu_stats = per_cpu_ptr(v->stats, i);
1215                 do {
1216                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1217                         rxpackets = cpu_stats->rx_packets;
1218                         rxbytes = cpu_stats->rx_bytes;
1219                         txbytes = cpu_stats->tx_bytes;
1220                         txpackets = cpu_stats->tx_packets;
1221                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1222
1223                 stats->rx_packets += rxpackets;
1224                 stats->rx_bytes += rxbytes;
1225                 stats->tx_bytes += txbytes;
1226                 stats->tx_packets += txpackets;
1227         }
1228 }
1229
1230 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1231 {
1232         struct net_bridge_vlan_group *vg;
1233         struct net_bridge_port *p;
1234
1235         ASSERT_RTNL();
1236         p = br_port_get_check_rtnl(dev);
1237         if (p)
1238                 vg = nbp_vlan_group(p);
1239         else if (netif_is_bridge_master(dev))
1240                 vg = br_vlan_group(netdev_priv(dev));
1241         else
1242                 return -EINVAL;
1243
1244         *p_pvid = br_get_pvid(vg);
1245         return 0;
1246 }
1247 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1248
1249 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1250                      struct bridge_vlan_info *p_vinfo)
1251 {
1252         struct net_bridge_vlan_group *vg;
1253         struct net_bridge_vlan *v;
1254         struct net_bridge_port *p;
1255
1256         ASSERT_RTNL();
1257         p = br_port_get_check_rtnl(dev);
1258         if (p)
1259                 vg = nbp_vlan_group(p);
1260         else if (netif_is_bridge_master(dev))
1261                 vg = br_vlan_group(netdev_priv(dev));
1262         else
1263                 return -EINVAL;
1264
1265         v = br_vlan_find(vg, vid);
1266         if (!v)
1267                 return -ENOENT;
1268
1269         p_vinfo->vid = vid;
1270         p_vinfo->flags = v->flags;
1271         return 0;
1272 }
1273 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1274
1275 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1276 {
1277         return is_vlan_dev(dev) &&
1278                 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1279 }
1280
1281 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1282                                        __always_unused void *data)
1283 {
1284         return br_vlan_is_bind_vlan_dev(dev);
1285 }
1286
1287 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1288 {
1289         int found;
1290
1291         rcu_read_lock();
1292         found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1293                                               NULL);
1294         rcu_read_unlock();
1295
1296         return !!found;
1297 }
1298
1299 struct br_vlan_bind_walk_data {
1300         u16 vid;
1301         struct net_device *result;
1302 };
1303
1304 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1305                                           void *data_in)
1306 {
1307         struct br_vlan_bind_walk_data *data = data_in;
1308         int found = 0;
1309
1310         if (br_vlan_is_bind_vlan_dev(dev) &&
1311             vlan_dev_priv(dev)->vlan_id == data->vid) {
1312                 data->result = dev;
1313                 found = 1;
1314         }
1315
1316         return found;
1317 }
1318
1319 static struct net_device *
1320 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1321 {
1322         struct br_vlan_bind_walk_data data = {
1323                 .vid = vid,
1324         };
1325
1326         rcu_read_lock();
1327         netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1328                                       &data);
1329         rcu_read_unlock();
1330
1331         return data.result;
1332 }
1333
1334 static bool br_vlan_is_dev_up(const struct net_device *dev)
1335 {
1336         return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1337 }
1338
1339 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1340                                        struct net_device *vlan_dev)
1341 {
1342         u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1343         struct net_bridge_vlan_group *vg;
1344         struct net_bridge_port *p;
1345         bool has_carrier = false;
1346
1347         if (!netif_carrier_ok(br->dev)) {
1348                 netif_carrier_off(vlan_dev);
1349                 return;
1350         }
1351
1352         list_for_each_entry(p, &br->port_list, list) {
1353                 vg = nbp_vlan_group(p);
1354                 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1355                         has_carrier = true;
1356                         break;
1357                 }
1358         }
1359
1360         if (has_carrier)
1361                 netif_carrier_on(vlan_dev);
1362         else
1363                 netif_carrier_off(vlan_dev);
1364 }
1365
1366 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1367 {
1368         struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1369         struct net_bridge_vlan *vlan;
1370         struct net_device *vlan_dev;
1371
1372         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1373                 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1374                                                            vlan->vid);
1375                 if (vlan_dev) {
1376                         if (br_vlan_is_dev_up(p->dev)) {
1377                                 if (netif_carrier_ok(p->br->dev))
1378                                         netif_carrier_on(vlan_dev);
1379                         } else {
1380                                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1381                         }
1382                 }
1383         }
1384 }
1385
1386 static void br_vlan_upper_change(struct net_device *dev,
1387                                  struct net_device *upper_dev,
1388                                  bool linking)
1389 {
1390         struct net_bridge *br = netdev_priv(dev);
1391
1392         if (!br_vlan_is_bind_vlan_dev(upper_dev))
1393                 return;
1394
1395         if (linking) {
1396                 br_vlan_set_vlan_dev_state(br, upper_dev);
1397                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1398         } else {
1399                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1400                               br_vlan_has_upper_bind_vlan_dev(dev));
1401         }
1402 }
1403
1404 struct br_vlan_link_state_walk_data {
1405         struct net_bridge *br;
1406 };
1407
1408 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1409                                         void *data_in)
1410 {
1411         struct br_vlan_link_state_walk_data *data = data_in;
1412
1413         if (br_vlan_is_bind_vlan_dev(vlan_dev))
1414                 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1415
1416         return 0;
1417 }
1418
1419 static void br_vlan_link_state_change(struct net_device *dev,
1420                                       struct net_bridge *br)
1421 {
1422         struct br_vlan_link_state_walk_data data = {
1423                 .br = br
1424         };
1425
1426         rcu_read_lock();
1427         netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1428                                       &data);
1429         rcu_read_unlock();
1430 }
1431
1432 /* Must be protected by RTNL. */
1433 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1434 {
1435         struct net_device *vlan_dev;
1436
1437         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1438                 return;
1439
1440         vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1441         if (vlan_dev)
1442                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1443 }
1444
1445 /* Must be protected by RTNL. */
1446 void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
1447                           void *ptr)
1448 {
1449         struct netdev_notifier_changeupper_info *info;
1450         struct net_bridge *br;
1451
1452         switch (event) {
1453         case NETDEV_CHANGEUPPER:
1454                 info = ptr;
1455                 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1456                 break;
1457
1458         case NETDEV_CHANGE:
1459         case NETDEV_UP:
1460                 br = netdev_priv(dev);
1461                 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1462                         return;
1463                 br_vlan_link_state_change(dev, br);
1464                 break;
1465         }
1466 }
1467
1468 /* Must be protected by RTNL. */
1469 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1470 {
1471         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1472                 return;
1473
1474         switch (event) {
1475         case NETDEV_CHANGE:
1476         case NETDEV_DOWN:
1477         case NETDEV_UP:
1478                 br_vlan_set_all_vlan_dev_state(p);
1479                 break;
1480         }
1481 }