net: dsa: avoid suspicious RCU usage for synced VLAN-aware MAC addresses
[sfrench/cifs-2.6.git] / net / dsa / slave.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/selftests.h>
19 #include <net/tc_act/tc_mirred.h>
20 #include <linux/if_bridge.h>
21 #include <linux/if_hsr.h>
22 #include <net/dcbnl.h>
23 #include <linux/netpoll.h>
24
25 #include "dsa.h"
26 #include "port.h"
27 #include "master.h"
28 #include "netlink.h"
29 #include "slave.h"
30 #include "switch.h"
31 #include "tag.h"
32
33 struct dsa_switchdev_event_work {
34         struct net_device *dev;
35         struct net_device *orig_dev;
36         struct work_struct work;
37         unsigned long event;
38         /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
39          * SWITCHDEV_FDB_DEL_TO_DEVICE
40          */
41         unsigned char addr[ETH_ALEN];
42         u16 vid;
43         bool host_addr;
44 };
45
46 enum dsa_standalone_event {
47         DSA_UC_ADD,
48         DSA_UC_DEL,
49         DSA_MC_ADD,
50         DSA_MC_DEL,
51 };
52
53 struct dsa_standalone_event_work {
54         struct work_struct work;
55         struct net_device *dev;
56         enum dsa_standalone_event event;
57         unsigned char addr[ETH_ALEN];
58         u16 vid;
59 };
60
61 struct dsa_host_vlan_rx_filtering_ctx {
62         struct net_device *dev;
63         const unsigned char *addr;
64         enum dsa_standalone_event event;
65 };
66
67 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
68 {
69         return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
70                ds->fdb_isolation && !ds->vlan_filtering_is_global &&
71                !ds->needs_standalone_vlan_filtering;
72 }
73
74 static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
75 {
76         return ds->ops->port_mdb_add && ds->ops->port_mdb_del &&
77                ds->fdb_isolation && !ds->vlan_filtering_is_global &&
78                !ds->needs_standalone_vlan_filtering;
79 }
80
81 static void dsa_slave_standalone_event_work(struct work_struct *work)
82 {
83         struct dsa_standalone_event_work *standalone_work =
84                 container_of(work, struct dsa_standalone_event_work, work);
85         const unsigned char *addr = standalone_work->addr;
86         struct net_device *dev = standalone_work->dev;
87         struct dsa_port *dp = dsa_slave_to_port(dev);
88         struct switchdev_obj_port_mdb mdb;
89         struct dsa_switch *ds = dp->ds;
90         u16 vid = standalone_work->vid;
91         int err;
92
93         switch (standalone_work->event) {
94         case DSA_UC_ADD:
95                 err = dsa_port_standalone_host_fdb_add(dp, addr, vid);
96                 if (err) {
97                         dev_err(ds->dev,
98                                 "port %d failed to add %pM vid %d to fdb: %d\n",
99                                 dp->index, addr, vid, err);
100                         break;
101                 }
102                 break;
103
104         case DSA_UC_DEL:
105                 err = dsa_port_standalone_host_fdb_del(dp, addr, vid);
106                 if (err) {
107                         dev_err(ds->dev,
108                                 "port %d failed to delete %pM vid %d from fdb: %d\n",
109                                 dp->index, addr, vid, err);
110                 }
111
112                 break;
113         case DSA_MC_ADD:
114                 ether_addr_copy(mdb.addr, addr);
115                 mdb.vid = vid;
116
117                 err = dsa_port_standalone_host_mdb_add(dp, &mdb);
118                 if (err) {
119                         dev_err(ds->dev,
120                                 "port %d failed to add %pM vid %d to mdb: %d\n",
121                                 dp->index, addr, vid, err);
122                         break;
123                 }
124                 break;
125         case DSA_MC_DEL:
126                 ether_addr_copy(mdb.addr, addr);
127                 mdb.vid = vid;
128
129                 err = dsa_port_standalone_host_mdb_del(dp, &mdb);
130                 if (err) {
131                         dev_err(ds->dev,
132                                 "port %d failed to delete %pM vid %d from mdb: %d\n",
133                                 dp->index, addr, vid, err);
134                 }
135
136                 break;
137         }
138
139         kfree(standalone_work);
140 }
141
142 static int dsa_slave_schedule_standalone_work(struct net_device *dev,
143                                               enum dsa_standalone_event event,
144                                               const unsigned char *addr,
145                                               u16 vid)
146 {
147         struct dsa_standalone_event_work *standalone_work;
148
149         standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC);
150         if (!standalone_work)
151                 return -ENOMEM;
152
153         INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
154         standalone_work->event = event;
155         standalone_work->dev = dev;
156
157         ether_addr_copy(standalone_work->addr, addr);
158         standalone_work->vid = vid;
159
160         dsa_schedule_work(&standalone_work->work);
161
162         return 0;
163 }
164
165 static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid)
166 {
167         struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
168
169         return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
170                                                   ctx->addr, vid);
171 }
172
173 static int dsa_slave_vlan_for_each(struct net_device *dev,
174                                    int (*cb)(void *arg, int vid), void *arg)
175 {
176         struct dsa_port *dp = dsa_slave_to_port(dev);
177         struct dsa_vlan *v;
178         int err;
179
180         lockdep_assert_held(&dev->addr_list_lock);
181
182         err = cb(arg, 0);
183         if (err)
184                 return err;
185
186         list_for_each_entry(v, &dp->user_vlans, list) {
187                 err = cb(arg, v->vid);
188                 if (err)
189                         return err;
190         }
191
192         return 0;
193 }
194
195 static int dsa_slave_sync_uc(struct net_device *dev,
196                              const unsigned char *addr)
197 {
198         struct net_device *master = dsa_slave_to_master(dev);
199         struct dsa_port *dp = dsa_slave_to_port(dev);
200         struct dsa_host_vlan_rx_filtering_ctx ctx = {
201                 .dev = dev,
202                 .addr = addr,
203                 .event = DSA_UC_ADD,
204         };
205
206         dev_uc_add(master, addr);
207
208         if (!dsa_switch_supports_uc_filtering(dp->ds))
209                 return 0;
210
211         return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
212                                        &ctx);
213 }
214
215 static int dsa_slave_unsync_uc(struct net_device *dev,
216                                const unsigned char *addr)
217 {
218         struct net_device *master = dsa_slave_to_master(dev);
219         struct dsa_port *dp = dsa_slave_to_port(dev);
220         struct dsa_host_vlan_rx_filtering_ctx ctx = {
221                 .dev = dev,
222                 .addr = addr,
223                 .event = DSA_UC_DEL,
224         };
225
226         dev_uc_del(master, addr);
227
228         if (!dsa_switch_supports_uc_filtering(dp->ds))
229                 return 0;
230
231         return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
232                                        &ctx);
233 }
234
235 static int dsa_slave_sync_mc(struct net_device *dev,
236                              const unsigned char *addr)
237 {
238         struct net_device *master = dsa_slave_to_master(dev);
239         struct dsa_port *dp = dsa_slave_to_port(dev);
240         struct dsa_host_vlan_rx_filtering_ctx ctx = {
241                 .dev = dev,
242                 .addr = addr,
243                 .event = DSA_MC_ADD,
244         };
245
246         dev_mc_add(master, addr);
247
248         if (!dsa_switch_supports_mc_filtering(dp->ds))
249                 return 0;
250
251         return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
252                                        &ctx);
253 }
254
255 static int dsa_slave_unsync_mc(struct net_device *dev,
256                                const unsigned char *addr)
257 {
258         struct net_device *master = dsa_slave_to_master(dev);
259         struct dsa_port *dp = dsa_slave_to_port(dev);
260         struct dsa_host_vlan_rx_filtering_ctx ctx = {
261                 .dev = dev,
262                 .addr = addr,
263                 .event = DSA_MC_DEL,
264         };
265
266         dev_mc_del(master, addr);
267
268         if (!dsa_switch_supports_mc_filtering(dp->ds))
269                 return 0;
270
271         return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
272                                        &ctx);
273 }
274
275 void dsa_slave_sync_ha(struct net_device *dev)
276 {
277         struct dsa_port *dp = dsa_slave_to_port(dev);
278         struct dsa_switch *ds = dp->ds;
279         struct netdev_hw_addr *ha;
280
281         netif_addr_lock_bh(dev);
282
283         netdev_for_each_synced_mc_addr(ha, dev)
284                 dsa_slave_sync_mc(dev, ha->addr);
285
286         netdev_for_each_synced_uc_addr(ha, dev)
287                 dsa_slave_sync_uc(dev, ha->addr);
288
289         netif_addr_unlock_bh(dev);
290
291         if (dsa_switch_supports_uc_filtering(ds) ||
292             dsa_switch_supports_mc_filtering(ds))
293                 dsa_flush_workqueue();
294 }
295
296 void dsa_slave_unsync_ha(struct net_device *dev)
297 {
298         struct dsa_port *dp = dsa_slave_to_port(dev);
299         struct dsa_switch *ds = dp->ds;
300         struct netdev_hw_addr *ha;
301
302         netif_addr_lock_bh(dev);
303
304         netdev_for_each_synced_uc_addr(ha, dev)
305                 dsa_slave_unsync_uc(dev, ha->addr);
306
307         netdev_for_each_synced_mc_addr(ha, dev)
308                 dsa_slave_unsync_mc(dev, ha->addr);
309
310         netif_addr_unlock_bh(dev);
311
312         if (dsa_switch_supports_uc_filtering(ds) ||
313             dsa_switch_supports_mc_filtering(ds))
314                 dsa_flush_workqueue();
315 }
316
317 /* slave mii_bus handling ***************************************************/
318 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
319 {
320         struct dsa_switch *ds = bus->priv;
321
322         if (ds->phys_mii_mask & (1 << addr))
323                 return ds->ops->phy_read(ds, addr, reg);
324
325         return 0xffff;
326 }
327
328 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
329 {
330         struct dsa_switch *ds = bus->priv;
331
332         if (ds->phys_mii_mask & (1 << addr))
333                 return ds->ops->phy_write(ds, addr, reg, val);
334
335         return 0;
336 }
337
338 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
339 {
340         ds->slave_mii_bus->priv = (void *)ds;
341         ds->slave_mii_bus->name = "dsa slave smi";
342         ds->slave_mii_bus->read = dsa_slave_phy_read;
343         ds->slave_mii_bus->write = dsa_slave_phy_write;
344         snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
345                  ds->dst->index, ds->index);
346         ds->slave_mii_bus->parent = ds->dev;
347         ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
348 }
349
350
351 /* slave device handling ****************************************************/
352 static int dsa_slave_get_iflink(const struct net_device *dev)
353 {
354         return dsa_slave_to_master(dev)->ifindex;
355 }
356
357 static int dsa_slave_open(struct net_device *dev)
358 {
359         struct net_device *master = dsa_slave_to_master(dev);
360         struct dsa_port *dp = dsa_slave_to_port(dev);
361         struct dsa_switch *ds = dp->ds;
362         int err;
363
364         err = dev_open(master, NULL);
365         if (err < 0) {
366                 netdev_err(dev, "failed to open master %s\n", master->name);
367                 goto out;
368         }
369
370         if (dsa_switch_supports_uc_filtering(ds)) {
371                 err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
372                 if (err)
373                         goto out;
374         }
375
376         if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
377                 err = dev_uc_add(master, dev->dev_addr);
378                 if (err < 0)
379                         goto del_host_addr;
380         }
381
382         err = dsa_port_enable_rt(dp, dev->phydev);
383         if (err)
384                 goto del_unicast;
385
386         return 0;
387
388 del_unicast:
389         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
390                 dev_uc_del(master, dev->dev_addr);
391 del_host_addr:
392         if (dsa_switch_supports_uc_filtering(ds))
393                 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
394 out:
395         return err;
396 }
397
398 static int dsa_slave_close(struct net_device *dev)
399 {
400         struct net_device *master = dsa_slave_to_master(dev);
401         struct dsa_port *dp = dsa_slave_to_port(dev);
402         struct dsa_switch *ds = dp->ds;
403
404         dsa_port_disable_rt(dp);
405
406         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
407                 dev_uc_del(master, dev->dev_addr);
408
409         if (dsa_switch_supports_uc_filtering(ds))
410                 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
411
412         return 0;
413 }
414
415 static void dsa_slave_manage_host_flood(struct net_device *dev)
416 {
417         bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
418         struct dsa_port *dp = dsa_slave_to_port(dev);
419         bool uc = dev->flags & IFF_PROMISC;
420
421         dsa_port_set_host_flood(dp, uc, mc);
422 }
423
424 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
425 {
426         struct net_device *master = dsa_slave_to_master(dev);
427         struct dsa_port *dp = dsa_slave_to_port(dev);
428         struct dsa_switch *ds = dp->ds;
429
430         if (change & IFF_ALLMULTI)
431                 dev_set_allmulti(master,
432                                  dev->flags & IFF_ALLMULTI ? 1 : -1);
433         if (change & IFF_PROMISC)
434                 dev_set_promiscuity(master,
435                                     dev->flags & IFF_PROMISC ? 1 : -1);
436
437         if (dsa_switch_supports_uc_filtering(ds) &&
438             dsa_switch_supports_mc_filtering(ds))
439                 dsa_slave_manage_host_flood(dev);
440 }
441
442 static void dsa_slave_set_rx_mode(struct net_device *dev)
443 {
444         __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
445         __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
446 }
447
448 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
449 {
450         struct net_device *master = dsa_slave_to_master(dev);
451         struct dsa_port *dp = dsa_slave_to_port(dev);
452         struct dsa_switch *ds = dp->ds;
453         struct sockaddr *addr = a;
454         int err;
455
456         if (!is_valid_ether_addr(addr->sa_data))
457                 return -EADDRNOTAVAIL;
458
459         /* If the port is down, the address isn't synced yet to hardware or
460          * to the DSA master, so there is nothing to change.
461          */
462         if (!(dev->flags & IFF_UP))
463                 goto out_change_dev_addr;
464
465         if (dsa_switch_supports_uc_filtering(ds)) {
466                 err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
467                 if (err)
468                         return err;
469         }
470
471         if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
472                 err = dev_uc_add(master, addr->sa_data);
473                 if (err < 0)
474                         goto del_unicast;
475         }
476
477         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
478                 dev_uc_del(master, dev->dev_addr);
479
480         if (dsa_switch_supports_uc_filtering(ds))
481                 dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
482
483 out_change_dev_addr:
484         eth_hw_addr_set(dev, addr->sa_data);
485
486         return 0;
487
488 del_unicast:
489         if (dsa_switch_supports_uc_filtering(ds))
490                 dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
491
492         return err;
493 }
494
495 struct dsa_slave_dump_ctx {
496         struct net_device *dev;
497         struct sk_buff *skb;
498         struct netlink_callback *cb;
499         int idx;
500 };
501
502 static int
503 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
504                            bool is_static, void *data)
505 {
506         struct dsa_slave_dump_ctx *dump = data;
507         u32 portid = NETLINK_CB(dump->cb->skb).portid;
508         u32 seq = dump->cb->nlh->nlmsg_seq;
509         struct nlmsghdr *nlh;
510         struct ndmsg *ndm;
511
512         if (dump->idx < dump->cb->args[2])
513                 goto skip;
514
515         nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
516                         sizeof(*ndm), NLM_F_MULTI);
517         if (!nlh)
518                 return -EMSGSIZE;
519
520         ndm = nlmsg_data(nlh);
521         ndm->ndm_family  = AF_BRIDGE;
522         ndm->ndm_pad1    = 0;
523         ndm->ndm_pad2    = 0;
524         ndm->ndm_flags   = NTF_SELF;
525         ndm->ndm_type    = 0;
526         ndm->ndm_ifindex = dump->dev->ifindex;
527         ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
528
529         if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
530                 goto nla_put_failure;
531
532         if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
533                 goto nla_put_failure;
534
535         nlmsg_end(dump->skb, nlh);
536
537 skip:
538         dump->idx++;
539         return 0;
540
541 nla_put_failure:
542         nlmsg_cancel(dump->skb, nlh);
543         return -EMSGSIZE;
544 }
545
546 static int
547 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
548                    struct net_device *dev, struct net_device *filter_dev,
549                    int *idx)
550 {
551         struct dsa_port *dp = dsa_slave_to_port(dev);
552         struct dsa_slave_dump_ctx dump = {
553                 .dev = dev,
554                 .skb = skb,
555                 .cb = cb,
556                 .idx = *idx,
557         };
558         int err;
559
560         err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
561         *idx = dump.idx;
562
563         return err;
564 }
565
566 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
567 {
568         struct dsa_slave_priv *p = netdev_priv(dev);
569         struct dsa_switch *ds = p->dp->ds;
570         int port = p->dp->index;
571
572         /* Pass through to switch driver if it supports timestamping */
573         switch (cmd) {
574         case SIOCGHWTSTAMP:
575                 if (ds->ops->port_hwtstamp_get)
576                         return ds->ops->port_hwtstamp_get(ds, port, ifr);
577                 break;
578         case SIOCSHWTSTAMP:
579                 if (ds->ops->port_hwtstamp_set)
580                         return ds->ops->port_hwtstamp_set(ds, port, ifr);
581                 break;
582         }
583
584         return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
585 }
586
587 static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
588                                    const struct switchdev_attr *attr,
589                                    struct netlink_ext_ack *extack)
590 {
591         struct dsa_port *dp = dsa_slave_to_port(dev);
592         int ret;
593
594         if (ctx && ctx != dp)
595                 return 0;
596
597         switch (attr->id) {
598         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
599                 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
600                         return -EOPNOTSUPP;
601
602                 ret = dsa_port_set_state(dp, attr->u.stp_state, true);
603                 break;
604         case SWITCHDEV_ATTR_ID_PORT_MST_STATE:
605                 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
606                         return -EOPNOTSUPP;
607
608                 ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack);
609                 break;
610         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
611                 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
612                         return -EOPNOTSUPP;
613
614                 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
615                                               extack);
616                 break;
617         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
618                 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
619                         return -EOPNOTSUPP;
620
621                 ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
622                 break;
623         case SWITCHDEV_ATTR_ID_BRIDGE_MST:
624                 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
625                         return -EOPNOTSUPP;
626
627                 ret = dsa_port_mst_enable(dp, attr->u.mst, extack);
628                 break;
629         case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
630                 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
631                         return -EOPNOTSUPP;
632
633                 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
634                                                 extack);
635                 break;
636         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
637                 if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev))
638                         return -EOPNOTSUPP;
639
640                 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
641                 break;
642         case SWITCHDEV_ATTR_ID_VLAN_MSTI:
643                 if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
644                         return -EOPNOTSUPP;
645
646                 ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti);
647                 break;
648         default:
649                 ret = -EOPNOTSUPP;
650                 break;
651         }
652
653         return ret;
654 }
655
656 /* Must be called under rcu_read_lock() */
657 static int
658 dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
659                                       const struct switchdev_obj_port_vlan *vlan)
660 {
661         struct net_device *upper_dev;
662         struct list_head *iter;
663
664         netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
665                 u16 vid;
666
667                 if (!is_vlan_dev(upper_dev))
668                         continue;
669
670                 vid = vlan_dev_vlan_id(upper_dev);
671                 if (vid == vlan->vid)
672                         return -EBUSY;
673         }
674
675         return 0;
676 }
677
678 static int dsa_slave_vlan_add(struct net_device *dev,
679                               const struct switchdev_obj *obj,
680                               struct netlink_ext_ack *extack)
681 {
682         struct dsa_port *dp = dsa_slave_to_port(dev);
683         struct switchdev_obj_port_vlan *vlan;
684         int err;
685
686         if (dsa_port_skip_vlan_configuration(dp)) {
687                 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
688                 return 0;
689         }
690
691         vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
692
693         /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
694          * the same VID.
695          */
696         if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
697                 rcu_read_lock();
698                 err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
699                 rcu_read_unlock();
700                 if (err) {
701                         NL_SET_ERR_MSG_MOD(extack,
702                                            "Port already has a VLAN upper with this VID");
703                         return err;
704                 }
705         }
706
707         return dsa_port_vlan_add(dp, vlan, extack);
708 }
709
710 /* Offload a VLAN installed on the bridge or on a foreign interface by
711  * installing it as a VLAN towards the CPU port.
712  */
713 static int dsa_slave_host_vlan_add(struct net_device *dev,
714                                    const struct switchdev_obj *obj,
715                                    struct netlink_ext_ack *extack)
716 {
717         struct dsa_port *dp = dsa_slave_to_port(dev);
718         struct switchdev_obj_port_vlan vlan;
719
720         /* Do nothing if this is a software bridge */
721         if (!dp->bridge)
722                 return -EOPNOTSUPP;
723
724         if (dsa_port_skip_vlan_configuration(dp)) {
725                 NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
726                 return 0;
727         }
728
729         vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
730
731         /* Even though drivers often handle CPU membership in special ways,
732          * it doesn't make sense to program a PVID, so clear this flag.
733          */
734         vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
735
736         return dsa_port_host_vlan_add(dp, &vlan, extack);
737 }
738
739 static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
740                                   const struct switchdev_obj *obj,
741                                   struct netlink_ext_ack *extack)
742 {
743         struct dsa_port *dp = dsa_slave_to_port(dev);
744         int err;
745
746         if (ctx && ctx != dp)
747                 return 0;
748
749         switch (obj->id) {
750         case SWITCHDEV_OBJ_ID_PORT_MDB:
751                 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
752                         return -EOPNOTSUPP;
753
754                 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
755                 break;
756         case SWITCHDEV_OBJ_ID_HOST_MDB:
757                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
758                         return -EOPNOTSUPP;
759
760                 err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
761                 break;
762         case SWITCHDEV_OBJ_ID_PORT_VLAN:
763                 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
764                         err = dsa_slave_vlan_add(dev, obj, extack);
765                 else
766                         err = dsa_slave_host_vlan_add(dev, obj, extack);
767                 break;
768         case SWITCHDEV_OBJ_ID_MRP:
769                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
770                         return -EOPNOTSUPP;
771
772                 err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
773                 break;
774         case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
775                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
776                         return -EOPNOTSUPP;
777
778                 err = dsa_port_mrp_add_ring_role(dp,
779                                                  SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
780                 break;
781         default:
782                 err = -EOPNOTSUPP;
783                 break;
784         }
785
786         return err;
787 }
788
789 static int dsa_slave_vlan_del(struct net_device *dev,
790                               const struct switchdev_obj *obj)
791 {
792         struct dsa_port *dp = dsa_slave_to_port(dev);
793         struct switchdev_obj_port_vlan *vlan;
794
795         if (dsa_port_skip_vlan_configuration(dp))
796                 return 0;
797
798         vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
799
800         return dsa_port_vlan_del(dp, vlan);
801 }
802
803 static int dsa_slave_host_vlan_del(struct net_device *dev,
804                                    const struct switchdev_obj *obj)
805 {
806         struct dsa_port *dp = dsa_slave_to_port(dev);
807         struct switchdev_obj_port_vlan *vlan;
808
809         /* Do nothing if this is a software bridge */
810         if (!dp->bridge)
811                 return -EOPNOTSUPP;
812
813         if (dsa_port_skip_vlan_configuration(dp))
814                 return 0;
815
816         vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
817
818         return dsa_port_host_vlan_del(dp, vlan);
819 }
820
821 static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
822                                   const struct switchdev_obj *obj)
823 {
824         struct dsa_port *dp = dsa_slave_to_port(dev);
825         int err;
826
827         if (ctx && ctx != dp)
828                 return 0;
829
830         switch (obj->id) {
831         case SWITCHDEV_OBJ_ID_PORT_MDB:
832                 if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
833                         return -EOPNOTSUPP;
834
835                 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
836                 break;
837         case SWITCHDEV_OBJ_ID_HOST_MDB:
838                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
839                         return -EOPNOTSUPP;
840
841                 err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
842                 break;
843         case SWITCHDEV_OBJ_ID_PORT_VLAN:
844                 if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
845                         err = dsa_slave_vlan_del(dev, obj);
846                 else
847                         err = dsa_slave_host_vlan_del(dev, obj);
848                 break;
849         case SWITCHDEV_OBJ_ID_MRP:
850                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
851                         return -EOPNOTSUPP;
852
853                 err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
854                 break;
855         case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
856                 if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
857                         return -EOPNOTSUPP;
858
859                 err = dsa_port_mrp_del_ring_role(dp,
860                                                  SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
861                 break;
862         default:
863                 err = -EOPNOTSUPP;
864                 break;
865         }
866
867         return err;
868 }
869
870 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
871                                                      struct sk_buff *skb)
872 {
873 #ifdef CONFIG_NET_POLL_CONTROLLER
874         struct dsa_slave_priv *p = netdev_priv(dev);
875
876         return netpoll_send_skb(p->netpoll, skb);
877 #else
878         BUG();
879         return NETDEV_TX_OK;
880 #endif
881 }
882
883 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
884                                  struct sk_buff *skb)
885 {
886         struct dsa_switch *ds = p->dp->ds;
887
888         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
889                 return;
890
891         if (!ds->ops->port_txtstamp)
892                 return;
893
894         ds->ops->port_txtstamp(ds, p->dp->index, skb);
895 }
896
897 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
898 {
899         /* SKB for netpoll still need to be mangled with the protocol-specific
900          * tag to be successfully transmitted
901          */
902         if (unlikely(netpoll_tx_running(dev)))
903                 return dsa_slave_netpoll_send_skb(dev, skb);
904
905         /* Queue the SKB for transmission on the parent interface, but
906          * do not modify its EtherType
907          */
908         skb->dev = dsa_slave_to_master(dev);
909         dev_queue_xmit(skb);
910
911         return NETDEV_TX_OK;
912 }
913 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
914
915 static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
916 {
917         int needed_headroom = dev->needed_headroom;
918         int needed_tailroom = dev->needed_tailroom;
919
920         /* For tail taggers, we need to pad short frames ourselves, to ensure
921          * that the tail tag does not fail at its role of being at the end of
922          * the packet, once the master interface pads the frame. Account for
923          * that pad length here, and pad later.
924          */
925         if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
926                 needed_tailroom += ETH_ZLEN - skb->len;
927         /* skb_headroom() returns unsigned int... */
928         needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
929         needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
930
931         if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
932                 /* No reallocation needed, yay! */
933                 return 0;
934
935         return pskb_expand_head(skb, needed_headroom, needed_tailroom,
936                                 GFP_ATOMIC);
937 }
938
939 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
940 {
941         struct dsa_slave_priv *p = netdev_priv(dev);
942         struct sk_buff *nskb;
943
944         dev_sw_netstats_tx_add(dev, 1, skb->len);
945
946         memset(skb->cb, 0, sizeof(skb->cb));
947
948         /* Handle tx timestamp if any */
949         dsa_skb_tx_timestamp(p, skb);
950
951         if (dsa_realloc_skb(skb, dev)) {
952                 dev_kfree_skb_any(skb);
953                 return NETDEV_TX_OK;
954         }
955
956         /* needed_tailroom should still be 'warm' in the cache line from
957          * dsa_realloc_skb(), which has also ensured that padding is safe.
958          */
959         if (dev->needed_tailroom)
960                 eth_skb_pad(skb);
961
962         /* Transmit function may have to reallocate the original SKB,
963          * in which case it must have freed it. Only free it here on error.
964          */
965         nskb = p->xmit(skb, dev);
966         if (!nskb) {
967                 kfree_skb(skb);
968                 return NETDEV_TX_OK;
969         }
970
971         return dsa_enqueue_skb(nskb, dev);
972 }
973
974 /* ethtool operations *******************************************************/
975
976 static void dsa_slave_get_drvinfo(struct net_device *dev,
977                                   struct ethtool_drvinfo *drvinfo)
978 {
979         strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
980         strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
981         strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
982 }
983
984 static int dsa_slave_get_regs_len(struct net_device *dev)
985 {
986         struct dsa_port *dp = dsa_slave_to_port(dev);
987         struct dsa_switch *ds = dp->ds;
988
989         if (ds->ops->get_regs_len)
990                 return ds->ops->get_regs_len(ds, dp->index);
991
992         return -EOPNOTSUPP;
993 }
994
995 static void
996 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
997 {
998         struct dsa_port *dp = dsa_slave_to_port(dev);
999         struct dsa_switch *ds = dp->ds;
1000
1001         if (ds->ops->get_regs)
1002                 ds->ops->get_regs(ds, dp->index, regs, _p);
1003 }
1004
1005 static int dsa_slave_nway_reset(struct net_device *dev)
1006 {
1007         struct dsa_port *dp = dsa_slave_to_port(dev);
1008
1009         return phylink_ethtool_nway_reset(dp->pl);
1010 }
1011
1012 static int dsa_slave_get_eeprom_len(struct net_device *dev)
1013 {
1014         struct dsa_port *dp = dsa_slave_to_port(dev);
1015         struct dsa_switch *ds = dp->ds;
1016
1017         if (ds->cd && ds->cd->eeprom_len)
1018                 return ds->cd->eeprom_len;
1019
1020         if (ds->ops->get_eeprom_len)
1021                 return ds->ops->get_eeprom_len(ds);
1022
1023         return 0;
1024 }
1025
1026 static int dsa_slave_get_eeprom(struct net_device *dev,
1027                                 struct ethtool_eeprom *eeprom, u8 *data)
1028 {
1029         struct dsa_port *dp = dsa_slave_to_port(dev);
1030         struct dsa_switch *ds = dp->ds;
1031
1032         if (ds->ops->get_eeprom)
1033                 return ds->ops->get_eeprom(ds, eeprom, data);
1034
1035         return -EOPNOTSUPP;
1036 }
1037
1038 static int dsa_slave_set_eeprom(struct net_device *dev,
1039                                 struct ethtool_eeprom *eeprom, u8 *data)
1040 {
1041         struct dsa_port *dp = dsa_slave_to_port(dev);
1042         struct dsa_switch *ds = dp->ds;
1043
1044         if (ds->ops->set_eeprom)
1045                 return ds->ops->set_eeprom(ds, eeprom, data);
1046
1047         return -EOPNOTSUPP;
1048 }
1049
1050 static void dsa_slave_get_strings(struct net_device *dev,
1051                                   uint32_t stringset, uint8_t *data)
1052 {
1053         struct dsa_port *dp = dsa_slave_to_port(dev);
1054         struct dsa_switch *ds = dp->ds;
1055
1056         if (stringset == ETH_SS_STATS) {
1057                 int len = ETH_GSTRING_LEN;
1058
1059                 strncpy(data, "tx_packets", len);
1060                 strncpy(data + len, "tx_bytes", len);
1061                 strncpy(data + 2 * len, "rx_packets", len);
1062                 strncpy(data + 3 * len, "rx_bytes", len);
1063                 if (ds->ops->get_strings)
1064                         ds->ops->get_strings(ds, dp->index, stringset,
1065                                              data + 4 * len);
1066         } else if (stringset ==  ETH_SS_TEST) {
1067                 net_selftest_get_strings(data);
1068         }
1069
1070 }
1071
1072 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
1073                                         struct ethtool_stats *stats,
1074                                         uint64_t *data)
1075 {
1076         struct dsa_port *dp = dsa_slave_to_port(dev);
1077         struct dsa_switch *ds = dp->ds;
1078         struct pcpu_sw_netstats *s;
1079         unsigned int start;
1080         int i;
1081
1082         for_each_possible_cpu(i) {
1083                 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1084
1085                 s = per_cpu_ptr(dev->tstats, i);
1086                 do {
1087                         start = u64_stats_fetch_begin(&s->syncp);
1088                         tx_packets = u64_stats_read(&s->tx_packets);
1089                         tx_bytes = u64_stats_read(&s->tx_bytes);
1090                         rx_packets = u64_stats_read(&s->rx_packets);
1091                         rx_bytes = u64_stats_read(&s->rx_bytes);
1092                 } while (u64_stats_fetch_retry(&s->syncp, start));
1093                 data[0] += tx_packets;
1094                 data[1] += tx_bytes;
1095                 data[2] += rx_packets;
1096                 data[3] += rx_bytes;
1097         }
1098         if (ds->ops->get_ethtool_stats)
1099                 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
1100 }
1101
1102 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
1103 {
1104         struct dsa_port *dp = dsa_slave_to_port(dev);
1105         struct dsa_switch *ds = dp->ds;
1106
1107         if (sset == ETH_SS_STATS) {
1108                 int count = 0;
1109
1110                 if (ds->ops->get_sset_count) {
1111                         count = ds->ops->get_sset_count(ds, dp->index, sset);
1112                         if (count < 0)
1113                                 return count;
1114                 }
1115
1116                 return count + 4;
1117         } else if (sset ==  ETH_SS_TEST) {
1118                 return net_selftest_get_count();
1119         }
1120
1121         return -EOPNOTSUPP;
1122 }
1123
1124 static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
1125                                         struct ethtool_eth_phy_stats *phy_stats)
1126 {
1127         struct dsa_port *dp = dsa_slave_to_port(dev);
1128         struct dsa_switch *ds = dp->ds;
1129
1130         if (ds->ops->get_eth_phy_stats)
1131                 ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
1132 }
1133
1134 static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
1135                                         struct ethtool_eth_mac_stats *mac_stats)
1136 {
1137         struct dsa_port *dp = dsa_slave_to_port(dev);
1138         struct dsa_switch *ds = dp->ds;
1139
1140         if (ds->ops->get_eth_mac_stats)
1141                 ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats);
1142 }
1143
1144 static void
1145 dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
1146                              struct ethtool_eth_ctrl_stats *ctrl_stats)
1147 {
1148         struct dsa_port *dp = dsa_slave_to_port(dev);
1149         struct dsa_switch *ds = dp->ds;
1150
1151         if (ds->ops->get_eth_ctrl_stats)
1152                 ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats);
1153 }
1154
1155 static void
1156 dsa_slave_get_rmon_stats(struct net_device *dev,
1157                          struct ethtool_rmon_stats *rmon_stats,
1158                          const struct ethtool_rmon_hist_range **ranges)
1159 {
1160         struct dsa_port *dp = dsa_slave_to_port(dev);
1161         struct dsa_switch *ds = dp->ds;
1162
1163         if (ds->ops->get_rmon_stats)
1164                 ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
1165 }
1166
1167 static void dsa_slave_net_selftest(struct net_device *ndev,
1168                                    struct ethtool_test *etest, u64 *buf)
1169 {
1170         struct dsa_port *dp = dsa_slave_to_port(ndev);
1171         struct dsa_switch *ds = dp->ds;
1172
1173         if (ds->ops->self_test) {
1174                 ds->ops->self_test(ds, dp->index, etest, buf);
1175                 return;
1176         }
1177
1178         net_selftest(ndev, etest, buf);
1179 }
1180
1181 static int dsa_slave_get_mm(struct net_device *dev,
1182                             struct ethtool_mm_state *state)
1183 {
1184         struct dsa_port *dp = dsa_slave_to_port(dev);
1185         struct dsa_switch *ds = dp->ds;
1186
1187         if (!ds->ops->get_mm)
1188                 return -EOPNOTSUPP;
1189
1190         return ds->ops->get_mm(ds, dp->index, state);
1191 }
1192
1193 static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
1194                             struct netlink_ext_ack *extack)
1195 {
1196         struct dsa_port *dp = dsa_slave_to_port(dev);
1197         struct dsa_switch *ds = dp->ds;
1198
1199         if (!ds->ops->set_mm)
1200                 return -EOPNOTSUPP;
1201
1202         return ds->ops->set_mm(ds, dp->index, cfg, extack);
1203 }
1204
1205 static void dsa_slave_get_mm_stats(struct net_device *dev,
1206                                    struct ethtool_mm_stats *stats)
1207 {
1208         struct dsa_port *dp = dsa_slave_to_port(dev);
1209         struct dsa_switch *ds = dp->ds;
1210
1211         if (ds->ops->get_mm_stats)
1212                 ds->ops->get_mm_stats(ds, dp->index, stats);
1213 }
1214
1215 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1216 {
1217         struct dsa_port *dp = dsa_slave_to_port(dev);
1218         struct dsa_switch *ds = dp->ds;
1219
1220         phylink_ethtool_get_wol(dp->pl, w);
1221
1222         if (ds->ops->get_wol)
1223                 ds->ops->get_wol(ds, dp->index, w);
1224 }
1225
1226 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
1227 {
1228         struct dsa_port *dp = dsa_slave_to_port(dev);
1229         struct dsa_switch *ds = dp->ds;
1230         int ret = -EOPNOTSUPP;
1231
1232         phylink_ethtool_set_wol(dp->pl, w);
1233
1234         if (ds->ops->set_wol)
1235                 ret = ds->ops->set_wol(ds, dp->index, w);
1236
1237         return ret;
1238 }
1239
1240 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
1241 {
1242         struct dsa_port *dp = dsa_slave_to_port(dev);
1243         struct dsa_switch *ds = dp->ds;
1244         int ret;
1245
1246         /* Port's PHY and MAC both need to be EEE capable */
1247         if (!dev->phydev || !dp->pl)
1248                 return -ENODEV;
1249
1250         if (!ds->ops->set_mac_eee)
1251                 return -EOPNOTSUPP;
1252
1253         ret = ds->ops->set_mac_eee(ds, dp->index, e);
1254         if (ret)
1255                 return ret;
1256
1257         return phylink_ethtool_set_eee(dp->pl, e);
1258 }
1259
1260 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
1261 {
1262         struct dsa_port *dp = dsa_slave_to_port(dev);
1263         struct dsa_switch *ds = dp->ds;
1264         int ret;
1265
1266         /* Port's PHY and MAC both need to be EEE capable */
1267         if (!dev->phydev || !dp->pl)
1268                 return -ENODEV;
1269
1270         if (!ds->ops->get_mac_eee)
1271                 return -EOPNOTSUPP;
1272
1273         ret = ds->ops->get_mac_eee(ds, dp->index, e);
1274         if (ret)
1275                 return ret;
1276
1277         return phylink_ethtool_get_eee(dp->pl, e);
1278 }
1279
1280 static int dsa_slave_get_link_ksettings(struct net_device *dev,
1281                                         struct ethtool_link_ksettings *cmd)
1282 {
1283         struct dsa_port *dp = dsa_slave_to_port(dev);
1284
1285         return phylink_ethtool_ksettings_get(dp->pl, cmd);
1286 }
1287
1288 static int dsa_slave_set_link_ksettings(struct net_device *dev,
1289                                         const struct ethtool_link_ksettings *cmd)
1290 {
1291         struct dsa_port *dp = dsa_slave_to_port(dev);
1292
1293         return phylink_ethtool_ksettings_set(dp->pl, cmd);
1294 }
1295
1296 static void dsa_slave_get_pause_stats(struct net_device *dev,
1297                                   struct ethtool_pause_stats *pause_stats)
1298 {
1299         struct dsa_port *dp = dsa_slave_to_port(dev);
1300         struct dsa_switch *ds = dp->ds;
1301
1302         if (ds->ops->get_pause_stats)
1303                 ds->ops->get_pause_stats(ds, dp->index, pause_stats);
1304 }
1305
1306 static void dsa_slave_get_pauseparam(struct net_device *dev,
1307                                      struct ethtool_pauseparam *pause)
1308 {
1309         struct dsa_port *dp = dsa_slave_to_port(dev);
1310
1311         phylink_ethtool_get_pauseparam(dp->pl, pause);
1312 }
1313
1314 static int dsa_slave_set_pauseparam(struct net_device *dev,
1315                                     struct ethtool_pauseparam *pause)
1316 {
1317         struct dsa_port *dp = dsa_slave_to_port(dev);
1318
1319         return phylink_ethtool_set_pauseparam(dp->pl, pause);
1320 }
1321
1322 #ifdef CONFIG_NET_POLL_CONTROLLER
1323 static int dsa_slave_netpoll_setup(struct net_device *dev,
1324                                    struct netpoll_info *ni)
1325 {
1326         struct net_device *master = dsa_slave_to_master(dev);
1327         struct dsa_slave_priv *p = netdev_priv(dev);
1328         struct netpoll *netpoll;
1329         int err = 0;
1330
1331         netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
1332         if (!netpoll)
1333                 return -ENOMEM;
1334
1335         err = __netpoll_setup(netpoll, master);
1336         if (err) {
1337                 kfree(netpoll);
1338                 goto out;
1339         }
1340
1341         p->netpoll = netpoll;
1342 out:
1343         return err;
1344 }
1345
1346 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
1347 {
1348         struct dsa_slave_priv *p = netdev_priv(dev);
1349         struct netpoll *netpoll = p->netpoll;
1350
1351         if (!netpoll)
1352                 return;
1353
1354         p->netpoll = NULL;
1355
1356         __netpoll_free(netpoll);
1357 }
1358
1359 static void dsa_slave_poll_controller(struct net_device *dev)
1360 {
1361 }
1362 #endif
1363
1364 static struct dsa_mall_tc_entry *
1365 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
1366 {
1367         struct dsa_slave_priv *p = netdev_priv(dev);
1368         struct dsa_mall_tc_entry *mall_tc_entry;
1369
1370         list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
1371                 if (mall_tc_entry->cookie == cookie)
1372                         return mall_tc_entry;
1373
1374         return NULL;
1375 }
1376
1377 static int
1378 dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
1379                                   struct tc_cls_matchall_offload *cls,
1380                                   bool ingress)
1381 {
1382         struct netlink_ext_ack *extack = cls->common.extack;
1383         struct dsa_port *dp = dsa_slave_to_port(dev);
1384         struct dsa_slave_priv *p = netdev_priv(dev);
1385         struct dsa_mall_mirror_tc_entry *mirror;
1386         struct dsa_mall_tc_entry *mall_tc_entry;
1387         struct dsa_switch *ds = dp->ds;
1388         struct flow_action_entry *act;
1389         struct dsa_port *to_dp;
1390         int err;
1391
1392         if (!ds->ops->port_mirror_add)
1393                 return -EOPNOTSUPP;
1394
1395         if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1396                                               cls->common.extack))
1397                 return -EOPNOTSUPP;
1398
1399         act = &cls->rule->action.entries[0];
1400
1401         if (!act->dev)
1402                 return -EINVAL;
1403
1404         if (!dsa_slave_dev_check(act->dev))
1405                 return -EOPNOTSUPP;
1406
1407         mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1408         if (!mall_tc_entry)
1409                 return -ENOMEM;
1410
1411         mall_tc_entry->cookie = cls->cookie;
1412         mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
1413         mirror = &mall_tc_entry->mirror;
1414
1415         to_dp = dsa_slave_to_port(act->dev);
1416
1417         mirror->to_local_port = to_dp->index;
1418         mirror->ingress = ingress;
1419
1420         err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack);
1421         if (err) {
1422                 kfree(mall_tc_entry);
1423                 return err;
1424         }
1425
1426         list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1427
1428         return err;
1429 }
1430
1431 static int
1432 dsa_slave_add_cls_matchall_police(struct net_device *dev,
1433                                   struct tc_cls_matchall_offload *cls,
1434                                   bool ingress)
1435 {
1436         struct netlink_ext_ack *extack = cls->common.extack;
1437         struct dsa_port *dp = dsa_slave_to_port(dev);
1438         struct dsa_slave_priv *p = netdev_priv(dev);
1439         struct dsa_mall_policer_tc_entry *policer;
1440         struct dsa_mall_tc_entry *mall_tc_entry;
1441         struct dsa_switch *ds = dp->ds;
1442         struct flow_action_entry *act;
1443         int err;
1444
1445         if (!ds->ops->port_policer_add) {
1446                 NL_SET_ERR_MSG_MOD(extack,
1447                                    "Policing offload not implemented");
1448                 return -EOPNOTSUPP;
1449         }
1450
1451         if (!ingress) {
1452                 NL_SET_ERR_MSG_MOD(extack,
1453                                    "Only supported on ingress qdisc");
1454                 return -EOPNOTSUPP;
1455         }
1456
1457         if (!flow_action_basic_hw_stats_check(&cls->rule->action,
1458                                               cls->common.extack))
1459                 return -EOPNOTSUPP;
1460
1461         list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
1462                 if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
1463                         NL_SET_ERR_MSG_MOD(extack,
1464                                            "Only one port policer allowed");
1465                         return -EEXIST;
1466                 }
1467         }
1468
1469         act = &cls->rule->action.entries[0];
1470
1471         mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1472         if (!mall_tc_entry)
1473                 return -ENOMEM;
1474
1475         mall_tc_entry->cookie = cls->cookie;
1476         mall_tc_entry->type = DSA_PORT_MALL_POLICER;
1477         policer = &mall_tc_entry->policer;
1478         policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
1479         policer->burst = act->police.burst;
1480
1481         err = ds->ops->port_policer_add(ds, dp->index, policer);
1482         if (err) {
1483                 kfree(mall_tc_entry);
1484                 return err;
1485         }
1486
1487         list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
1488
1489         return err;
1490 }
1491
1492 static int dsa_slave_add_cls_matchall(struct net_device *dev,
1493                                       struct tc_cls_matchall_offload *cls,
1494                                       bool ingress)
1495 {
1496         int err = -EOPNOTSUPP;
1497
1498         if (cls->common.protocol == htons(ETH_P_ALL) &&
1499             flow_offload_has_one_action(&cls->rule->action) &&
1500             cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
1501                 err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
1502         else if (flow_offload_has_one_action(&cls->rule->action) &&
1503                  cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
1504                 err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
1505
1506         return err;
1507 }
1508
1509 static void dsa_slave_del_cls_matchall(struct net_device *dev,
1510                                        struct tc_cls_matchall_offload *cls)
1511 {
1512         struct dsa_port *dp = dsa_slave_to_port(dev);
1513         struct dsa_mall_tc_entry *mall_tc_entry;
1514         struct dsa_switch *ds = dp->ds;
1515
1516         mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
1517         if (!mall_tc_entry)
1518                 return;
1519
1520         list_del(&mall_tc_entry->list);
1521
1522         switch (mall_tc_entry->type) {
1523         case DSA_PORT_MALL_MIRROR:
1524                 if (ds->ops->port_mirror_del)
1525                         ds->ops->port_mirror_del(ds, dp->index,
1526                                                  &mall_tc_entry->mirror);
1527                 break;
1528         case DSA_PORT_MALL_POLICER:
1529                 if (ds->ops->port_policer_del)
1530                         ds->ops->port_policer_del(ds, dp->index);
1531                 break;
1532         default:
1533                 WARN_ON(1);
1534         }
1535
1536         kfree(mall_tc_entry);
1537 }
1538
1539 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
1540                                            struct tc_cls_matchall_offload *cls,
1541                                            bool ingress)
1542 {
1543         if (cls->common.chain_index)
1544                 return -EOPNOTSUPP;
1545
1546         switch (cls->command) {
1547         case TC_CLSMATCHALL_REPLACE:
1548                 return dsa_slave_add_cls_matchall(dev, cls, ingress);
1549         case TC_CLSMATCHALL_DESTROY:
1550                 dsa_slave_del_cls_matchall(dev, cls);
1551                 return 0;
1552         default:
1553                 return -EOPNOTSUPP;
1554         }
1555 }
1556
1557 static int dsa_slave_add_cls_flower(struct net_device *dev,
1558                                     struct flow_cls_offload *cls,
1559                                     bool ingress)
1560 {
1561         struct dsa_port *dp = dsa_slave_to_port(dev);
1562         struct dsa_switch *ds = dp->ds;
1563         int port = dp->index;
1564
1565         if (!ds->ops->cls_flower_add)
1566                 return -EOPNOTSUPP;
1567
1568         return ds->ops->cls_flower_add(ds, port, cls, ingress);
1569 }
1570
1571 static int dsa_slave_del_cls_flower(struct net_device *dev,
1572                                     struct flow_cls_offload *cls,
1573                                     bool ingress)
1574 {
1575         struct dsa_port *dp = dsa_slave_to_port(dev);
1576         struct dsa_switch *ds = dp->ds;
1577         int port = dp->index;
1578
1579         if (!ds->ops->cls_flower_del)
1580                 return -EOPNOTSUPP;
1581
1582         return ds->ops->cls_flower_del(ds, port, cls, ingress);
1583 }
1584
1585 static int dsa_slave_stats_cls_flower(struct net_device *dev,
1586                                       struct flow_cls_offload *cls,
1587                                       bool ingress)
1588 {
1589         struct dsa_port *dp = dsa_slave_to_port(dev);
1590         struct dsa_switch *ds = dp->ds;
1591         int port = dp->index;
1592
1593         if (!ds->ops->cls_flower_stats)
1594                 return -EOPNOTSUPP;
1595
1596         return ds->ops->cls_flower_stats(ds, port, cls, ingress);
1597 }
1598
1599 static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
1600                                          struct flow_cls_offload *cls,
1601                                          bool ingress)
1602 {
1603         switch (cls->command) {
1604         case FLOW_CLS_REPLACE:
1605                 return dsa_slave_add_cls_flower(dev, cls, ingress);
1606         case FLOW_CLS_DESTROY:
1607                 return dsa_slave_del_cls_flower(dev, cls, ingress);
1608         case FLOW_CLS_STATS:
1609                 return dsa_slave_stats_cls_flower(dev, cls, ingress);
1610         default:
1611                 return -EOPNOTSUPP;
1612         }
1613 }
1614
1615 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1616                                        void *cb_priv, bool ingress)
1617 {
1618         struct net_device *dev = cb_priv;
1619
1620         if (!tc_can_offload(dev))
1621                 return -EOPNOTSUPP;
1622
1623         switch (type) {
1624         case TC_SETUP_CLSMATCHALL:
1625                 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
1626         case TC_SETUP_CLSFLOWER:
1627                 return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
1628         default:
1629                 return -EOPNOTSUPP;
1630         }
1631 }
1632
1633 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
1634                                           void *type_data, void *cb_priv)
1635 {
1636         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
1637 }
1638
1639 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
1640                                           void *type_data, void *cb_priv)
1641 {
1642         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
1643 }
1644
1645 static LIST_HEAD(dsa_slave_block_cb_list);
1646
1647 static int dsa_slave_setup_tc_block(struct net_device *dev,
1648                                     struct flow_block_offload *f)
1649 {
1650         struct flow_block_cb *block_cb;
1651         flow_setup_cb_t *cb;
1652
1653         if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1654                 cb = dsa_slave_setup_tc_block_cb_ig;
1655         else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1656                 cb = dsa_slave_setup_tc_block_cb_eg;
1657         else
1658                 return -EOPNOTSUPP;
1659
1660         f->driver_block_list = &dsa_slave_block_cb_list;
1661
1662         switch (f->command) {
1663         case FLOW_BLOCK_BIND:
1664                 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1665                         return -EBUSY;
1666
1667                 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
1668                 if (IS_ERR(block_cb))
1669                         return PTR_ERR(block_cb);
1670
1671                 flow_block_cb_add(block_cb, f);
1672                 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1673                 return 0;
1674         case FLOW_BLOCK_UNBIND:
1675                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
1676                 if (!block_cb)
1677                         return -ENOENT;
1678
1679                 flow_block_cb_remove(block_cb, f);
1680                 list_del(&block_cb->driver_list);
1681                 return 0;
1682         default:
1683                 return -EOPNOTSUPP;
1684         }
1685 }
1686
1687 static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
1688                                     void *type_data)
1689 {
1690         struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
1691
1692         if (!master->netdev_ops->ndo_setup_tc)
1693                 return -EOPNOTSUPP;
1694
1695         return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
1696 }
1697
1698 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
1699                               void *type_data)
1700 {
1701         struct dsa_port *dp = dsa_slave_to_port(dev);
1702         struct dsa_switch *ds = dp->ds;
1703
1704         switch (type) {
1705         case TC_SETUP_BLOCK:
1706                 return dsa_slave_setup_tc_block(dev, type_data);
1707         case TC_SETUP_FT:
1708                 return dsa_slave_setup_ft_block(ds, dp->index, type_data);
1709         default:
1710                 break;
1711         }
1712
1713         if (!ds->ops->port_setup_tc)
1714                 return -EOPNOTSUPP;
1715
1716         return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
1717 }
1718
1719 static int dsa_slave_get_rxnfc(struct net_device *dev,
1720                                struct ethtool_rxnfc *nfc, u32 *rule_locs)
1721 {
1722         struct dsa_port *dp = dsa_slave_to_port(dev);
1723         struct dsa_switch *ds = dp->ds;
1724
1725         if (!ds->ops->get_rxnfc)
1726                 return -EOPNOTSUPP;
1727
1728         return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1729 }
1730
1731 static int dsa_slave_set_rxnfc(struct net_device *dev,
1732                                struct ethtool_rxnfc *nfc)
1733 {
1734         struct dsa_port *dp = dsa_slave_to_port(dev);
1735         struct dsa_switch *ds = dp->ds;
1736
1737         if (!ds->ops->set_rxnfc)
1738                 return -EOPNOTSUPP;
1739
1740         return ds->ops->set_rxnfc(ds, dp->index, nfc);
1741 }
1742
1743 static int dsa_slave_get_ts_info(struct net_device *dev,
1744                                  struct ethtool_ts_info *ts)
1745 {
1746         struct dsa_slave_priv *p = netdev_priv(dev);
1747         struct dsa_switch *ds = p->dp->ds;
1748
1749         if (!ds->ops->get_ts_info)
1750                 return -EOPNOTSUPP;
1751
1752         return ds->ops->get_ts_info(ds, p->dp->index, ts);
1753 }
1754
1755 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1756                                      u16 vid)
1757 {
1758         struct dsa_port *dp = dsa_slave_to_port(dev);
1759         struct switchdev_obj_port_vlan vlan = {
1760                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1761                 .vid = vid,
1762                 /* This API only allows programming tagged, non-PVID VIDs */
1763                 .flags = 0,
1764         };
1765         struct netlink_ext_ack extack = {0};
1766         struct dsa_switch *ds = dp->ds;
1767         struct netdev_hw_addr *ha;
1768         struct dsa_vlan *v;
1769         int ret;
1770
1771         /* User port... */
1772         ret = dsa_port_vlan_add(dp, &vlan, &extack);
1773         if (ret) {
1774                 if (extack._msg)
1775                         netdev_err(dev, "%s\n", extack._msg);
1776                 return ret;
1777         }
1778
1779         /* And CPU port... */
1780         ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
1781         if (ret) {
1782                 if (extack._msg)
1783                         netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
1784                                    extack._msg);
1785                 return ret;
1786         }
1787
1788         if (!dsa_switch_supports_uc_filtering(ds) &&
1789             !dsa_switch_supports_mc_filtering(ds))
1790                 return 0;
1791
1792         v = kzalloc(sizeof(*v), GFP_KERNEL);
1793         if (!v) {
1794                 ret = -ENOMEM;
1795                 goto rollback;
1796         }
1797
1798         netif_addr_lock_bh(dev);
1799
1800         v->vid = vid;
1801         list_add_tail(&v->list, &dp->user_vlans);
1802
1803         if (dsa_switch_supports_mc_filtering(ds)) {
1804                 netdev_for_each_synced_mc_addr(ha, dev) {
1805                         dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
1806                                                            ha->addr, vid);
1807                 }
1808         }
1809
1810         if (dsa_switch_supports_uc_filtering(ds)) {
1811                 netdev_for_each_synced_uc_addr(ha, dev) {
1812                         dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
1813                                                            ha->addr, vid);
1814                 }
1815         }
1816
1817         netif_addr_unlock_bh(dev);
1818
1819         dsa_flush_workqueue();
1820
1821         return 0;
1822
1823 rollback:
1824         dsa_port_host_vlan_del(dp, &vlan);
1825         dsa_port_vlan_del(dp, &vlan);
1826
1827         return ret;
1828 }
1829
1830 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1831                                       u16 vid)
1832 {
1833         struct dsa_port *dp = dsa_slave_to_port(dev);
1834         struct switchdev_obj_port_vlan vlan = {
1835                 .vid = vid,
1836                 /* This API only allows programming tagged, non-PVID VIDs */
1837                 .flags = 0,
1838         };
1839         struct dsa_switch *ds = dp->ds;
1840         struct netdev_hw_addr *ha;
1841         struct dsa_vlan *v;
1842         int err;
1843
1844         err = dsa_port_vlan_del(dp, &vlan);
1845         if (err)
1846                 return err;
1847
1848         err = dsa_port_host_vlan_del(dp, &vlan);
1849         if (err)
1850                 return err;
1851
1852         if (!dsa_switch_supports_uc_filtering(ds) &&
1853             !dsa_switch_supports_mc_filtering(ds))
1854                 return 0;
1855
1856         netif_addr_lock_bh(dev);
1857
1858         v = dsa_vlan_find(&dp->user_vlans, &vlan);
1859         if (!v) {
1860                 netif_addr_unlock_bh(dev);
1861                 return -ENOENT;
1862         }
1863
1864         list_del(&v->list);
1865         kfree(v);
1866
1867         if (dsa_switch_supports_mc_filtering(ds)) {
1868                 netdev_for_each_synced_mc_addr(ha, dev) {
1869                         dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
1870                                                            ha->addr, vid);
1871                 }
1872         }
1873
1874         if (dsa_switch_supports_uc_filtering(ds)) {
1875                 netdev_for_each_synced_uc_addr(ha, dev) {
1876                         dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
1877                                                            ha->addr, vid);
1878                 }
1879         }
1880
1881         netif_addr_unlock_bh(dev);
1882
1883         dsa_flush_workqueue();
1884
1885         return 0;
1886 }
1887
1888 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
1889 {
1890         __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1891
1892         return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
1893 }
1894
1895 static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
1896 {
1897         __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
1898
1899         return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
1900 }
1901
1902 /* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
1903  * filtering is enabled. The baseline is that only ports that offload a
1904  * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware,
1905  * but there are exceptions for quirky hardware.
1906  *
1907  * If ds->vlan_filtering_is_global = true, then standalone ports which share
1908  * the same switch with other ports that offload a VLAN-aware bridge are also
1909  * inevitably VLAN-aware.
1910  *
1911  * To summarize, a DSA switch port offloads:
1912  *
1913  * - If standalone (this includes software bridge, software LAG):
1914  *     - if ds->needs_standalone_vlan_filtering = true, OR if
1915  *       (ds->vlan_filtering_is_global = true AND there are bridges spanning
1916  *       this switch chip which have vlan_filtering=1)
1917  *         - the 8021q upper VLANs
1918  *     - else (standalone VLAN filtering is not needed, VLAN filtering is not
1919  *       global, or it is, but no port is under a VLAN-aware bridge):
1920  *         - no VLAN (any 8021q upper is a software VLAN)
1921  *
1922  * - If under a vlan_filtering=0 bridge which it offload:
1923  *     - if ds->configure_vlan_while_not_filtering = true (default):
1924  *         - the bridge VLANs. These VLANs are committed to hardware but inactive.
1925  *     - else (deprecated):
1926  *         - no VLAN. The bridge VLANs are not restored when VLAN awareness is
1927  *           enabled, so this behavior is broken and discouraged.
1928  *
1929  * - If under a vlan_filtering=1 bridge which it offload:
1930  *     - the bridge VLANs
1931  *     - the 8021q upper VLANs
1932  */
1933 int dsa_slave_manage_vlan_filtering(struct net_device *slave,
1934                                     bool vlan_filtering)
1935 {
1936         int err;
1937
1938         if (vlan_filtering) {
1939                 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1940
1941                 err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
1942                 if (err) {
1943                         vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1944                         slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1945                         return err;
1946                 }
1947         } else {
1948                 err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
1949                 if (err)
1950                         return err;
1951
1952                 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1953         }
1954
1955         return 0;
1956 }
1957
1958 struct dsa_hw_port {
1959         struct list_head list;
1960         struct net_device *dev;
1961         int old_mtu;
1962 };
1963
1964 static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
1965 {
1966         const struct dsa_hw_port *p;
1967         int err;
1968
1969         list_for_each_entry(p, hw_port_list, list) {
1970                 if (p->dev->mtu == mtu)
1971                         continue;
1972
1973                 err = dev_set_mtu(p->dev, mtu);
1974                 if (err)
1975                         goto rollback;
1976         }
1977
1978         return 0;
1979
1980 rollback:
1981         list_for_each_entry_continue_reverse(p, hw_port_list, list) {
1982                 if (p->dev->mtu == p->old_mtu)
1983                         continue;
1984
1985                 if (dev_set_mtu(p->dev, p->old_mtu))
1986                         netdev_err(p->dev, "Failed to restore MTU\n");
1987         }
1988
1989         return err;
1990 }
1991
1992 static void dsa_hw_port_list_free(struct list_head *hw_port_list)
1993 {
1994         struct dsa_hw_port *p, *n;
1995
1996         list_for_each_entry_safe(p, n, hw_port_list, list)
1997                 kfree(p);
1998 }
1999
2000 /* Make the hardware datapath to/from @dev limited to a common MTU */
2001 static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
2002 {
2003         struct list_head hw_port_list;
2004         struct dsa_switch_tree *dst;
2005         int min_mtu = ETH_MAX_MTU;
2006         struct dsa_port *other_dp;
2007         int err;
2008
2009         if (!dp->ds->mtu_enforcement_ingress)
2010                 return;
2011
2012         if (!dp->bridge)
2013                 return;
2014
2015         INIT_LIST_HEAD(&hw_port_list);
2016
2017         /* Populate the list of ports that are part of the same bridge
2018          * as the newly added/modified port
2019          */
2020         list_for_each_entry(dst, &dsa_tree_list, list) {
2021                 list_for_each_entry(other_dp, &dst->ports, list) {
2022                         struct dsa_hw_port *hw_port;
2023                         struct net_device *slave;
2024
2025                         if (other_dp->type != DSA_PORT_TYPE_USER)
2026                                 continue;
2027
2028                         if (!dsa_port_bridge_same(dp, other_dp))
2029                                 continue;
2030
2031                         if (!other_dp->ds->mtu_enforcement_ingress)
2032                                 continue;
2033
2034                         slave = other_dp->slave;
2035
2036                         if (min_mtu > slave->mtu)
2037                                 min_mtu = slave->mtu;
2038
2039                         hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
2040                         if (!hw_port)
2041                                 goto out;
2042
2043                         hw_port->dev = slave;
2044                         hw_port->old_mtu = slave->mtu;
2045
2046                         list_add(&hw_port->list, &hw_port_list);
2047                 }
2048         }
2049
2050         /* Attempt to configure the entire hardware bridge to the newly added
2051          * interface's MTU first, regardless of whether the intention of the
2052          * user was to raise or lower it.
2053          */
2054         err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
2055         if (!err)
2056                 goto out;
2057
2058         /* Clearly that didn't work out so well, so just set the minimum MTU on
2059          * all hardware bridge ports now. If this fails too, then all ports will
2060          * still have their old MTU rolled back anyway.
2061          */
2062         dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
2063
2064 out:
2065         dsa_hw_port_list_free(&hw_port_list);
2066 }
2067
2068 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
2069 {
2070         struct net_device *master = dsa_slave_to_master(dev);
2071         struct dsa_port *dp = dsa_slave_to_port(dev);
2072         struct dsa_port *cpu_dp = dp->cpu_dp;
2073         struct dsa_switch *ds = dp->ds;
2074         struct dsa_port *other_dp;
2075         int largest_mtu = 0;
2076         int new_master_mtu;
2077         int old_master_mtu;
2078         int mtu_limit;
2079         int overhead;
2080         int cpu_mtu;
2081         int err;
2082
2083         if (!ds->ops->port_change_mtu)
2084                 return -EOPNOTSUPP;
2085
2086         dsa_tree_for_each_user_port(other_dp, ds->dst) {
2087                 int slave_mtu;
2088
2089                 /* During probe, this function will be called for each slave
2090                  * device, while not all of them have been allocated. That's
2091                  * ok, it doesn't change what the maximum is, so ignore it.
2092                  */
2093                 if (!other_dp->slave)
2094                         continue;
2095
2096                 /* Pretend that we already applied the setting, which we
2097                  * actually haven't (still haven't done all integrity checks)
2098                  */
2099                 if (dp == other_dp)
2100                         slave_mtu = new_mtu;
2101                 else
2102                         slave_mtu = other_dp->slave->mtu;
2103
2104                 if (largest_mtu < slave_mtu)
2105                         largest_mtu = slave_mtu;
2106         }
2107
2108         overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
2109         mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
2110         old_master_mtu = master->mtu;
2111         new_master_mtu = largest_mtu + overhead;
2112         if (new_master_mtu > mtu_limit)
2113                 return -ERANGE;
2114
2115         /* If the master MTU isn't over limit, there's no need to check the CPU
2116          * MTU, since that surely isn't either.
2117          */
2118         cpu_mtu = largest_mtu;
2119
2120         /* Start applying stuff */
2121         if (new_master_mtu != old_master_mtu) {
2122                 err = dev_set_mtu(master, new_master_mtu);
2123                 if (err < 0)
2124                         goto out_master_failed;
2125
2126                 /* We only need to propagate the MTU of the CPU port to
2127                  * upstream switches, so emit a notifier which updates them.
2128                  */
2129                 err = dsa_port_mtu_change(cpu_dp, cpu_mtu);
2130                 if (err)
2131                         goto out_cpu_failed;
2132         }
2133
2134         err = ds->ops->port_change_mtu(ds, dp->index, new_mtu);
2135         if (err)
2136                 goto out_port_failed;
2137
2138         dev->mtu = new_mtu;
2139
2140         dsa_bridge_mtu_normalization(dp);
2141
2142         return 0;
2143
2144 out_port_failed:
2145         if (new_master_mtu != old_master_mtu)
2146                 dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
2147 out_cpu_failed:
2148         if (new_master_mtu != old_master_mtu)
2149                 dev_set_mtu(master, old_master_mtu);
2150 out_master_failed:
2151         return err;
2152 }
2153
2154 static int __maybe_unused
2155 dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
2156 {
2157         struct dsa_port *dp = dsa_slave_to_port(dev);
2158         struct dsa_switch *ds = dp->ds;
2159         unsigned long mask, new_prio;
2160         int err, port = dp->index;
2161
2162         if (!ds->ops->port_set_default_prio)
2163                 return -EOPNOTSUPP;
2164
2165         err = dcb_ieee_setapp(dev, app);
2166         if (err)
2167                 return err;
2168
2169         mask = dcb_ieee_getapp_mask(dev, app);
2170         new_prio = __fls(mask);
2171
2172         err = ds->ops->port_set_default_prio(ds, port, new_prio);
2173         if (err) {
2174                 dcb_ieee_delapp(dev, app);
2175                 return err;
2176         }
2177
2178         return 0;
2179 }
2180
2181 static int __maybe_unused
2182 dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
2183 {
2184         struct dsa_port *dp = dsa_slave_to_port(dev);
2185         struct dsa_switch *ds = dp->ds;
2186         unsigned long mask, new_prio;
2187         int err, port = dp->index;
2188         u8 dscp = app->protocol;
2189
2190         if (!ds->ops->port_add_dscp_prio)
2191                 return -EOPNOTSUPP;
2192
2193         if (dscp >= 64) {
2194                 netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
2195                            dscp);
2196                 return -EINVAL;
2197         }
2198
2199         err = dcb_ieee_setapp(dev, app);
2200         if (err)
2201                 return err;
2202
2203         mask = dcb_ieee_getapp_mask(dev, app);
2204         new_prio = __fls(mask);
2205
2206         err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio);
2207         if (err) {
2208                 dcb_ieee_delapp(dev, app);
2209                 return err;
2210         }
2211
2212         return 0;
2213 }
2214
2215 static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
2216                                                       struct dcb_app *app)
2217 {
2218         switch (app->selector) {
2219         case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2220                 switch (app->protocol) {
2221                 case 0:
2222                         return dsa_slave_dcbnl_set_default_prio(dev, app);
2223                 default:
2224                         return -EOPNOTSUPP;
2225                 }
2226                 break;
2227         case IEEE_8021QAZ_APP_SEL_DSCP:
2228                 return dsa_slave_dcbnl_add_dscp_prio(dev, app);
2229         default:
2230                 return -EOPNOTSUPP;
2231         }
2232 }
2233
2234 static int __maybe_unused
2235 dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
2236 {
2237         struct dsa_port *dp = dsa_slave_to_port(dev);
2238         struct dsa_switch *ds = dp->ds;
2239         unsigned long mask, new_prio;
2240         int err, port = dp->index;
2241
2242         if (!ds->ops->port_set_default_prio)
2243                 return -EOPNOTSUPP;
2244
2245         err = dcb_ieee_delapp(dev, app);
2246         if (err)
2247                 return err;
2248
2249         mask = dcb_ieee_getapp_mask(dev, app);
2250         new_prio = mask ? __fls(mask) : 0;
2251
2252         err = ds->ops->port_set_default_prio(ds, port, new_prio);
2253         if (err) {
2254                 dcb_ieee_setapp(dev, app);
2255                 return err;
2256         }
2257
2258         return 0;
2259 }
2260
2261 static int __maybe_unused
2262 dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
2263 {
2264         struct dsa_port *dp = dsa_slave_to_port(dev);
2265         struct dsa_switch *ds = dp->ds;
2266         int err, port = dp->index;
2267         u8 dscp = app->protocol;
2268
2269         if (!ds->ops->port_del_dscp_prio)
2270                 return -EOPNOTSUPP;
2271
2272         err = dcb_ieee_delapp(dev, app);
2273         if (err)
2274                 return err;
2275
2276         err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority);
2277         if (err) {
2278                 dcb_ieee_setapp(dev, app);
2279                 return err;
2280         }
2281
2282         return 0;
2283 }
2284
2285 static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
2286                                                       struct dcb_app *app)
2287 {
2288         switch (app->selector) {
2289         case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
2290                 switch (app->protocol) {
2291                 case 0:
2292                         return dsa_slave_dcbnl_del_default_prio(dev, app);
2293                 default:
2294                         return -EOPNOTSUPP;
2295                 }
2296                 break;
2297         case IEEE_8021QAZ_APP_SEL_DSCP:
2298                 return dsa_slave_dcbnl_del_dscp_prio(dev, app);
2299         default:
2300                 return -EOPNOTSUPP;
2301         }
2302 }
2303
2304 /* Pre-populate the DCB application priority table with the priorities
2305  * configured during switch setup, which we read from hardware here.
2306  */
2307 static int dsa_slave_dcbnl_init(struct net_device *dev)
2308 {
2309         struct dsa_port *dp = dsa_slave_to_port(dev);
2310         struct dsa_switch *ds = dp->ds;
2311         int port = dp->index;
2312         int err;
2313
2314         if (ds->ops->port_get_default_prio) {
2315                 int prio = ds->ops->port_get_default_prio(ds, port);
2316                 struct dcb_app app = {
2317                         .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
2318                         .protocol = 0,
2319                         .priority = prio,
2320                 };
2321
2322                 if (prio < 0)
2323                         return prio;
2324
2325                 err = dcb_ieee_setapp(dev, &app);
2326                 if (err)
2327                         return err;
2328         }
2329
2330         if (ds->ops->port_get_dscp_prio) {
2331                 int protocol;
2332
2333                 for (protocol = 0; protocol < 64; protocol++) {
2334                         struct dcb_app app = {
2335                                 .selector = IEEE_8021QAZ_APP_SEL_DSCP,
2336                                 .protocol = protocol,
2337                         };
2338                         int prio;
2339
2340                         prio = ds->ops->port_get_dscp_prio(ds, port, protocol);
2341                         if (prio == -EOPNOTSUPP)
2342                                 continue;
2343                         if (prio < 0)
2344                                 return prio;
2345
2346                         app.priority = prio;
2347
2348                         err = dcb_ieee_setapp(dev, &app);
2349                         if (err)
2350                                 return err;
2351                 }
2352         }
2353
2354         return 0;
2355 }
2356
2357 static const struct ethtool_ops dsa_slave_ethtool_ops = {
2358         .get_drvinfo            = dsa_slave_get_drvinfo,
2359         .get_regs_len           = dsa_slave_get_regs_len,
2360         .get_regs               = dsa_slave_get_regs,
2361         .nway_reset             = dsa_slave_nway_reset,
2362         .get_link               = ethtool_op_get_link,
2363         .get_eeprom_len         = dsa_slave_get_eeprom_len,
2364         .get_eeprom             = dsa_slave_get_eeprom,
2365         .set_eeprom             = dsa_slave_set_eeprom,
2366         .get_strings            = dsa_slave_get_strings,
2367         .get_ethtool_stats      = dsa_slave_get_ethtool_stats,
2368         .get_sset_count         = dsa_slave_get_sset_count,
2369         .get_eth_phy_stats      = dsa_slave_get_eth_phy_stats,
2370         .get_eth_mac_stats      = dsa_slave_get_eth_mac_stats,
2371         .get_eth_ctrl_stats     = dsa_slave_get_eth_ctrl_stats,
2372         .get_rmon_stats         = dsa_slave_get_rmon_stats,
2373         .set_wol                = dsa_slave_set_wol,
2374         .get_wol                = dsa_slave_get_wol,
2375         .set_eee                = dsa_slave_set_eee,
2376         .get_eee                = dsa_slave_get_eee,
2377         .get_link_ksettings     = dsa_slave_get_link_ksettings,
2378         .set_link_ksettings     = dsa_slave_set_link_ksettings,
2379         .get_pause_stats        = dsa_slave_get_pause_stats,
2380         .get_pauseparam         = dsa_slave_get_pauseparam,
2381         .set_pauseparam         = dsa_slave_set_pauseparam,
2382         .get_rxnfc              = dsa_slave_get_rxnfc,
2383         .set_rxnfc              = dsa_slave_set_rxnfc,
2384         .get_ts_info            = dsa_slave_get_ts_info,
2385         .self_test              = dsa_slave_net_selftest,
2386         .get_mm                 = dsa_slave_get_mm,
2387         .set_mm                 = dsa_slave_set_mm,
2388         .get_mm_stats           = dsa_slave_get_mm_stats,
2389 };
2390
2391 static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
2392         .ieee_setapp            = dsa_slave_dcbnl_ieee_setapp,
2393         .ieee_delapp            = dsa_slave_dcbnl_ieee_delapp,
2394 };
2395
2396 static void dsa_slave_get_stats64(struct net_device *dev,
2397                                   struct rtnl_link_stats64 *s)
2398 {
2399         struct dsa_port *dp = dsa_slave_to_port(dev);
2400         struct dsa_switch *ds = dp->ds;
2401
2402         if (ds->ops->get_stats64)
2403                 ds->ops->get_stats64(ds, dp->index, s);
2404         else
2405                 dev_get_tstats64(dev, s);
2406 }
2407
2408 static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
2409                                        struct net_device_path *path)
2410 {
2411         struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
2412         struct net_device *master = dsa_port_to_master(dp);
2413         struct dsa_port *cpu_dp = dp->cpu_dp;
2414
2415         path->dev = ctx->dev;
2416         path->type = DEV_PATH_DSA;
2417         path->dsa.proto = cpu_dp->tag_ops->proto;
2418         path->dsa.port = dp->index;
2419         ctx->dev = master;
2420
2421         return 0;
2422 }
2423
2424 static const struct net_device_ops dsa_slave_netdev_ops = {
2425         .ndo_open               = dsa_slave_open,
2426         .ndo_stop               = dsa_slave_close,
2427         .ndo_start_xmit         = dsa_slave_xmit,
2428         .ndo_change_rx_flags    = dsa_slave_change_rx_flags,
2429         .ndo_set_rx_mode        = dsa_slave_set_rx_mode,
2430         .ndo_set_mac_address    = dsa_slave_set_mac_address,
2431         .ndo_fdb_dump           = dsa_slave_fdb_dump,
2432         .ndo_eth_ioctl          = dsa_slave_ioctl,
2433         .ndo_get_iflink         = dsa_slave_get_iflink,
2434 #ifdef CONFIG_NET_POLL_CONTROLLER
2435         .ndo_netpoll_setup      = dsa_slave_netpoll_setup,
2436         .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
2437         .ndo_poll_controller    = dsa_slave_poll_controller,
2438 #endif
2439         .ndo_setup_tc           = dsa_slave_setup_tc,
2440         .ndo_get_stats64        = dsa_slave_get_stats64,
2441         .ndo_vlan_rx_add_vid    = dsa_slave_vlan_rx_add_vid,
2442         .ndo_vlan_rx_kill_vid   = dsa_slave_vlan_rx_kill_vid,
2443         .ndo_change_mtu         = dsa_slave_change_mtu,
2444         .ndo_fill_forward_path  = dsa_slave_fill_forward_path,
2445 };
2446
2447 static struct device_type dsa_type = {
2448         .name   = "dsa",
2449 };
2450
2451 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
2452 {
2453         const struct dsa_port *dp = dsa_to_port(ds, port);
2454
2455         if (dp->pl)
2456                 phylink_mac_change(dp->pl, up);
2457 }
2458 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
2459
2460 static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
2461                                           struct phylink_link_state *state)
2462 {
2463         struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
2464         struct dsa_switch *ds = dp->ds;
2465
2466         /* No need to check that this operation is valid, the callback would
2467          * not be called if it was not.
2468          */
2469         ds->ops->phylink_fixed_state(ds, dp->index, state);
2470 }
2471
2472 /* slave device setup *******************************************************/
2473 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
2474                                  u32 flags)
2475 {
2476         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2477         struct dsa_switch *ds = dp->ds;
2478
2479         slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
2480         if (!slave_dev->phydev) {
2481                 netdev_err(slave_dev, "no phy at %d\n", addr);
2482                 return -ENODEV;
2483         }
2484
2485         slave_dev->phydev->dev_flags |= flags;
2486
2487         return phylink_connect_phy(dp->pl, slave_dev->phydev);
2488 }
2489
2490 static int dsa_slave_phy_setup(struct net_device *slave_dev)
2491 {
2492         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2493         struct device_node *port_dn = dp->dn;
2494         struct dsa_switch *ds = dp->ds;
2495         u32 phy_flags = 0;
2496         int ret;
2497
2498         dp->pl_config.dev = &slave_dev->dev;
2499         dp->pl_config.type = PHYLINK_NETDEV;
2500
2501         /* The get_fixed_state callback takes precedence over polling the
2502          * link GPIO in PHYLINK (see phylink_get_fixed_state).  Only set
2503          * this if the switch provides such a callback.
2504          */
2505         if (ds->ops->phylink_fixed_state) {
2506                 dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
2507                 dp->pl_config.poll_fixed_state = true;
2508         }
2509
2510         ret = dsa_port_phylink_create(dp);
2511         if (ret)
2512                 return ret;
2513
2514         if (ds->ops->get_phy_flags)
2515                 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
2516
2517         ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
2518         if (ret == -ENODEV && ds->slave_mii_bus) {
2519                 /* We could not connect to a designated PHY or SFP, so try to
2520                  * use the switch internal MDIO bus instead
2521                  */
2522                 ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
2523         }
2524         if (ret) {
2525                 netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
2526                            ERR_PTR(ret));
2527                 dsa_port_phylink_destroy(dp);
2528         }
2529
2530         return ret;
2531 }
2532
2533 void dsa_slave_setup_tagger(struct net_device *slave)
2534 {
2535         struct dsa_port *dp = dsa_slave_to_port(slave);
2536         struct net_device *master = dsa_port_to_master(dp);
2537         struct dsa_slave_priv *p = netdev_priv(slave);
2538         const struct dsa_port *cpu_dp = dp->cpu_dp;
2539         const struct dsa_switch *ds = dp->ds;
2540
2541         slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
2542         slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
2543         /* Try to save one extra realloc later in the TX path (in the master)
2544          * by also inheriting the master's needed headroom and tailroom.
2545          * The 8021q driver also does this.
2546          */
2547         slave->needed_headroom += master->needed_headroom;
2548         slave->needed_tailroom += master->needed_tailroom;
2549
2550         p->xmit = cpu_dp->tag_ops->xmit;
2551
2552         slave->features = master->vlan_features | NETIF_F_HW_TC;
2553         slave->hw_features |= NETIF_F_HW_TC;
2554         slave->features |= NETIF_F_LLTX;
2555         if (slave->needed_tailroom)
2556                 slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
2557         if (ds->needs_standalone_vlan_filtering)
2558                 slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2559 }
2560
2561 int dsa_slave_suspend(struct net_device *slave_dev)
2562 {
2563         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2564
2565         if (!netif_running(slave_dev))
2566                 return 0;
2567
2568         netif_device_detach(slave_dev);
2569
2570         rtnl_lock();
2571         phylink_stop(dp->pl);
2572         rtnl_unlock();
2573
2574         return 0;
2575 }
2576
2577 int dsa_slave_resume(struct net_device *slave_dev)
2578 {
2579         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2580
2581         if (!netif_running(slave_dev))
2582                 return 0;
2583
2584         netif_device_attach(slave_dev);
2585
2586         rtnl_lock();
2587         phylink_start(dp->pl);
2588         rtnl_unlock();
2589
2590         return 0;
2591 }
2592
2593 int dsa_slave_create(struct dsa_port *port)
2594 {
2595         struct net_device *master = dsa_port_to_master(port);
2596         struct dsa_switch *ds = port->ds;
2597         struct net_device *slave_dev;
2598         struct dsa_slave_priv *p;
2599         const char *name;
2600         int assign_type;
2601         int ret;
2602
2603         if (!ds->num_tx_queues)
2604                 ds->num_tx_queues = 1;
2605
2606         if (port->name) {
2607                 name = port->name;
2608                 assign_type = NET_NAME_PREDICTABLE;
2609         } else {
2610                 name = "eth%d";
2611                 assign_type = NET_NAME_ENUM;
2612         }
2613
2614         slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
2615                                      assign_type, ether_setup,
2616                                      ds->num_tx_queues, 1);
2617         if (slave_dev == NULL)
2618                 return -ENOMEM;
2619
2620         slave_dev->rtnl_link_ops = &dsa_link_ops;
2621         slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
2622 #if IS_ENABLED(CONFIG_DCB)
2623         slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
2624 #endif
2625         if (!is_zero_ether_addr(port->mac))
2626                 eth_hw_addr_set(slave_dev, port->mac);
2627         else
2628                 eth_hw_addr_inherit(slave_dev, master);
2629         slave_dev->priv_flags |= IFF_NO_QUEUE;
2630         if (dsa_switch_supports_uc_filtering(ds))
2631                 slave_dev->priv_flags |= IFF_UNICAST_FLT;
2632         slave_dev->netdev_ops = &dsa_slave_netdev_ops;
2633         if (ds->ops->port_max_mtu)
2634                 slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
2635         SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
2636
2637         SET_NETDEV_DEV(slave_dev, port->ds->dev);
2638         SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
2639         slave_dev->dev.of_node = port->dn;
2640         slave_dev->vlan_features = master->vlan_features;
2641
2642         p = netdev_priv(slave_dev);
2643         slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2644         if (!slave_dev->tstats) {
2645                 free_netdev(slave_dev);
2646                 return -ENOMEM;
2647         }
2648
2649         ret = gro_cells_init(&p->gcells, slave_dev);
2650         if (ret)
2651                 goto out_free;
2652
2653         p->dp = port;
2654         INIT_LIST_HEAD(&p->mall_tc_list);
2655         port->slave = slave_dev;
2656         dsa_slave_setup_tagger(slave_dev);
2657
2658         netif_carrier_off(slave_dev);
2659
2660         ret = dsa_slave_phy_setup(slave_dev);
2661         if (ret) {
2662                 netdev_err(slave_dev,
2663                            "error %d setting up PHY for tree %d, switch %d, port %d\n",
2664                            ret, ds->dst->index, ds->index, port->index);
2665                 goto out_gcells;
2666         }
2667
2668         rtnl_lock();
2669
2670         ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
2671         if (ret && ret != -EOPNOTSUPP)
2672                 dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
2673                          ret, ETH_DATA_LEN, port->index);
2674
2675         ret = register_netdevice(slave_dev);
2676         if (ret) {
2677                 netdev_err(master, "error %d registering interface %s\n",
2678                            ret, slave_dev->name);
2679                 rtnl_unlock();
2680                 goto out_phy;
2681         }
2682
2683         if (IS_ENABLED(CONFIG_DCB)) {
2684                 ret = dsa_slave_dcbnl_init(slave_dev);
2685                 if (ret) {
2686                         netdev_err(slave_dev,
2687                                    "failed to initialize DCB: %pe\n",
2688                                    ERR_PTR(ret));
2689                         rtnl_unlock();
2690                         goto out_unregister;
2691                 }
2692         }
2693
2694         ret = netdev_upper_dev_link(master, slave_dev, NULL);
2695
2696         rtnl_unlock();
2697
2698         if (ret)
2699                 goto out_unregister;
2700
2701         return 0;
2702
2703 out_unregister:
2704         unregister_netdev(slave_dev);
2705 out_phy:
2706         rtnl_lock();
2707         phylink_disconnect_phy(p->dp->pl);
2708         rtnl_unlock();
2709         dsa_port_phylink_destroy(p->dp);
2710 out_gcells:
2711         gro_cells_destroy(&p->gcells);
2712 out_free:
2713         free_percpu(slave_dev->tstats);
2714         free_netdev(slave_dev);
2715         port->slave = NULL;
2716         return ret;
2717 }
2718
2719 void dsa_slave_destroy(struct net_device *slave_dev)
2720 {
2721         struct net_device *master = dsa_slave_to_master(slave_dev);
2722         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
2723         struct dsa_slave_priv *p = netdev_priv(slave_dev);
2724
2725         netif_carrier_off(slave_dev);
2726         rtnl_lock();
2727         netdev_upper_dev_unlink(master, slave_dev);
2728         unregister_netdevice(slave_dev);
2729         phylink_disconnect_phy(dp->pl);
2730         rtnl_unlock();
2731
2732         dsa_port_phylink_destroy(dp);
2733         gro_cells_destroy(&p->gcells);
2734         free_percpu(slave_dev->tstats);
2735         free_netdev(slave_dev);
2736 }
2737
2738 int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
2739                             struct netlink_ext_ack *extack)
2740 {
2741         struct net_device *old_master = dsa_slave_to_master(dev);
2742         struct dsa_port *dp = dsa_slave_to_port(dev);
2743         struct dsa_switch *ds = dp->ds;
2744         struct net_device *upper;
2745         struct list_head *iter;
2746         int err;
2747
2748         if (master == old_master)
2749                 return 0;
2750
2751         if (!ds->ops->port_change_master) {
2752                 NL_SET_ERR_MSG_MOD(extack,
2753                                    "Driver does not support changing DSA master");
2754                 return -EOPNOTSUPP;
2755         }
2756
2757         if (!netdev_uses_dsa(master)) {
2758                 NL_SET_ERR_MSG_MOD(extack,
2759                                    "Interface not eligible as DSA master");
2760                 return -EOPNOTSUPP;
2761         }
2762
2763         netdev_for_each_upper_dev_rcu(master, upper, iter) {
2764                 if (dsa_slave_dev_check(upper))
2765                         continue;
2766                 if (netif_is_bridge_master(upper))
2767                         continue;
2768                 NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
2769                 return -EOPNOTSUPP;
2770         }
2771
2772         /* Since we allow live-changing the DSA master, plus we auto-open the
2773          * DSA master when the user port opens => we need to ensure that the
2774          * new DSA master is open too.
2775          */
2776         if (dev->flags & IFF_UP) {
2777                 err = dev_open(master, extack);
2778                 if (err)
2779                         return err;
2780         }
2781
2782         netdev_upper_dev_unlink(old_master, dev);
2783
2784         err = netdev_upper_dev_link(master, dev, extack);
2785         if (err)
2786                 goto out_revert_old_master_unlink;
2787
2788         err = dsa_port_change_master(dp, master, extack);
2789         if (err)
2790                 goto out_revert_master_link;
2791
2792         /* Update the MTU of the new CPU port through cross-chip notifiers */
2793         err = dsa_slave_change_mtu(dev, dev->mtu);
2794         if (err && err != -EOPNOTSUPP) {
2795                 netdev_warn(dev,
2796                             "nonfatal error updating MTU with new master: %pe\n",
2797                             ERR_PTR(err));
2798         }
2799
2800         /* If the port doesn't have its own MAC address and relies on the DSA
2801          * master's one, inherit it again from the new DSA master.
2802          */
2803         if (is_zero_ether_addr(dp->mac))
2804                 eth_hw_addr_inherit(dev, master);
2805
2806         return 0;
2807
2808 out_revert_master_link:
2809         netdev_upper_dev_unlink(master, dev);
2810 out_revert_old_master_unlink:
2811         netdev_upper_dev_link(old_master, dev, NULL);
2812         return err;
2813 }
2814
2815 bool dsa_slave_dev_check(const struct net_device *dev)
2816 {
2817         return dev->netdev_ops == &dsa_slave_netdev_ops;
2818 }
2819 EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
2820
2821 static int dsa_slave_changeupper(struct net_device *dev,
2822                                  struct netdev_notifier_changeupper_info *info)
2823 {
2824         struct dsa_port *dp = dsa_slave_to_port(dev);
2825         struct netlink_ext_ack *extack;
2826         int err = NOTIFY_DONE;
2827
2828         if (!dsa_slave_dev_check(dev))
2829                 return err;
2830
2831         extack = netdev_notifier_info_to_extack(&info->info);
2832
2833         if (netif_is_bridge_master(info->upper_dev)) {
2834                 if (info->linking) {
2835                         err = dsa_port_bridge_join(dp, info->upper_dev, extack);
2836                         if (!err)
2837                                 dsa_bridge_mtu_normalization(dp);
2838                         if (err == -EOPNOTSUPP) {
2839                                 NL_SET_ERR_MSG_WEAK_MOD(extack,
2840                                                         "Offloading not supported");
2841                                 err = 0;
2842                         }
2843                         err = notifier_from_errno(err);
2844                 } else {
2845                         dsa_port_bridge_leave(dp, info->upper_dev);
2846                         err = NOTIFY_OK;
2847                 }
2848         } else if (netif_is_lag_master(info->upper_dev)) {
2849                 if (info->linking) {
2850                         err = dsa_port_lag_join(dp, info->upper_dev,
2851                                                 info->upper_info, extack);
2852                         if (err == -EOPNOTSUPP) {
2853                                 NL_SET_ERR_MSG_WEAK_MOD(extack,
2854                                                         "Offloading not supported");
2855                                 err = 0;
2856                         }
2857                         err = notifier_from_errno(err);
2858                 } else {
2859                         dsa_port_lag_leave(dp, info->upper_dev);
2860                         err = NOTIFY_OK;
2861                 }
2862         } else if (is_hsr_master(info->upper_dev)) {
2863                 if (info->linking) {
2864                         err = dsa_port_hsr_join(dp, info->upper_dev);
2865                         if (err == -EOPNOTSUPP) {
2866                                 NL_SET_ERR_MSG_WEAK_MOD(extack,
2867                                                         "Offloading not supported");
2868                                 err = 0;
2869                         }
2870                         err = notifier_from_errno(err);
2871                 } else {
2872                         dsa_port_hsr_leave(dp, info->upper_dev);
2873                         err = NOTIFY_OK;
2874                 }
2875         }
2876
2877         return err;
2878 }
2879
2880 static int dsa_slave_prechangeupper(struct net_device *dev,
2881                                     struct netdev_notifier_changeupper_info *info)
2882 {
2883         struct dsa_port *dp = dsa_slave_to_port(dev);
2884
2885         if (!dsa_slave_dev_check(dev))
2886                 return NOTIFY_DONE;
2887
2888         if (netif_is_bridge_master(info->upper_dev) && !info->linking)
2889                 dsa_port_pre_bridge_leave(dp, info->upper_dev);
2890         else if (netif_is_lag_master(info->upper_dev) && !info->linking)
2891                 dsa_port_pre_lag_leave(dp, info->upper_dev);
2892         /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
2893          * meaningfully enslaved to a bridge yet
2894          */
2895
2896         return NOTIFY_DONE;
2897 }
2898
2899 static int
2900 dsa_slave_lag_changeupper(struct net_device *dev,
2901                           struct netdev_notifier_changeupper_info *info)
2902 {
2903         struct net_device *lower;
2904         struct list_head *iter;
2905         int err = NOTIFY_DONE;
2906         struct dsa_port *dp;
2907
2908         if (!netif_is_lag_master(dev))
2909                 return err;
2910
2911         netdev_for_each_lower_dev(dev, lower, iter) {
2912                 if (!dsa_slave_dev_check(lower))
2913                         continue;
2914
2915                 dp = dsa_slave_to_port(lower);
2916                 if (!dp->lag)
2917                         /* Software LAG */
2918                         continue;
2919
2920                 err = dsa_slave_changeupper(lower, info);
2921                 if (notifier_to_errno(err))
2922                         break;
2923         }
2924
2925         return err;
2926 }
2927
2928 /* Same as dsa_slave_lag_changeupper() except that it calls
2929  * dsa_slave_prechangeupper()
2930  */
2931 static int
2932 dsa_slave_lag_prechangeupper(struct net_device *dev,
2933                              struct netdev_notifier_changeupper_info *info)
2934 {
2935         struct net_device *lower;
2936         struct list_head *iter;
2937         int err = NOTIFY_DONE;
2938         struct dsa_port *dp;
2939
2940         if (!netif_is_lag_master(dev))
2941                 return err;
2942
2943         netdev_for_each_lower_dev(dev, lower, iter) {
2944                 if (!dsa_slave_dev_check(lower))
2945                         continue;
2946
2947                 dp = dsa_slave_to_port(lower);
2948                 if (!dp->lag)
2949                         /* Software LAG */
2950                         continue;
2951
2952                 err = dsa_slave_prechangeupper(lower, info);
2953                 if (notifier_to_errno(err))
2954                         break;
2955         }
2956
2957         return err;
2958 }
2959
2960 static int
2961 dsa_prevent_bridging_8021q_upper(struct net_device *dev,
2962                                  struct netdev_notifier_changeupper_info *info)
2963 {
2964         struct netlink_ext_ack *ext_ack;
2965         struct net_device *slave, *br;
2966         struct dsa_port *dp;
2967
2968         ext_ack = netdev_notifier_info_to_extack(&info->info);
2969
2970         if (!is_vlan_dev(dev))
2971                 return NOTIFY_DONE;
2972
2973         slave = vlan_dev_real_dev(dev);
2974         if (!dsa_slave_dev_check(slave))
2975                 return NOTIFY_DONE;
2976
2977         dp = dsa_slave_to_port(slave);
2978         br = dsa_port_bridge_dev_get(dp);
2979         if (!br)
2980                 return NOTIFY_DONE;
2981
2982         /* Deny enslaving a VLAN device into a VLAN-aware bridge */
2983         if (br_vlan_enabled(br) &&
2984             netif_is_bridge_master(info->upper_dev) && info->linking) {
2985                 NL_SET_ERR_MSG_MOD(ext_ack,
2986                                    "Cannot enslave VLAN device into VLAN aware bridge");
2987                 return notifier_from_errno(-EINVAL);
2988         }
2989
2990         return NOTIFY_DONE;
2991 }
2992
2993 static int
2994 dsa_slave_check_8021q_upper(struct net_device *dev,
2995                             struct netdev_notifier_changeupper_info *info)
2996 {
2997         struct dsa_port *dp = dsa_slave_to_port(dev);
2998         struct net_device *br = dsa_port_bridge_dev_get(dp);
2999         struct bridge_vlan_info br_info;
3000         struct netlink_ext_ack *extack;
3001         int err = NOTIFY_DONE;
3002         u16 vid;
3003
3004         if (!br || !br_vlan_enabled(br))
3005                 return NOTIFY_DONE;
3006
3007         extack = netdev_notifier_info_to_extack(&info->info);
3008         vid = vlan_dev_vlan_id(info->upper_dev);
3009
3010         /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
3011          * device, respectively the VID is not found, returning
3012          * 0 means success, which is a failure for us here.
3013          */
3014         err = br_vlan_get_info(br, vid, &br_info);
3015         if (err == 0) {
3016                 NL_SET_ERR_MSG_MOD(extack,
3017                                    "This VLAN is already configured by the bridge");
3018                 return notifier_from_errno(-EBUSY);
3019         }
3020
3021         return NOTIFY_DONE;
3022 }
3023
3024 static int
3025 dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
3026                                       struct netdev_notifier_changeupper_info *info)
3027 {
3028         struct dsa_switch *ds;
3029         struct dsa_port *dp;
3030         int err;
3031
3032         if (!dsa_slave_dev_check(dev))
3033                 return dsa_prevent_bridging_8021q_upper(dev, info);
3034
3035         dp = dsa_slave_to_port(dev);
3036         ds = dp->ds;
3037
3038         if (ds->ops->port_prechangeupper) {
3039                 err = ds->ops->port_prechangeupper(ds, dp->index, info);
3040                 if (err)
3041                         return notifier_from_errno(err);
3042         }
3043
3044         if (is_vlan_dev(info->upper_dev))
3045                 return dsa_slave_check_8021q_upper(dev, info);
3046
3047         return NOTIFY_DONE;
3048 }
3049
3050 /* To be eligible as a DSA master, a LAG must have all lower interfaces be
3051  * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
3052  * switches in the same switch tree.
3053  */
3054 static int dsa_lag_master_validate(struct net_device *lag_dev,
3055                                    struct netlink_ext_ack *extack)
3056 {
3057         struct net_device *lower1, *lower2;
3058         struct list_head *iter1, *iter2;
3059
3060         netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
3061                 netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
3062                         if (!netdev_uses_dsa(lower1) ||
3063                             !netdev_uses_dsa(lower2)) {
3064                                 NL_SET_ERR_MSG_MOD(extack,
3065                                                    "All LAG ports must be eligible as DSA masters");
3066                                 return notifier_from_errno(-EINVAL);
3067                         }
3068
3069                         if (lower1 == lower2)
3070                                 continue;
3071
3072                         if (!dsa_port_tree_same(lower1->dsa_ptr,
3073                                                 lower2->dsa_ptr)) {
3074                                 NL_SET_ERR_MSG_MOD(extack,
3075                                                    "LAG contains DSA masters of disjoint switch trees");
3076                                 return notifier_from_errno(-EINVAL);
3077                         }
3078                 }
3079         }
3080
3081         return NOTIFY_DONE;
3082 }
3083
3084 static int
3085 dsa_master_prechangeupper_sanity_check(struct net_device *master,
3086                                        struct netdev_notifier_changeupper_info *info)
3087 {
3088         struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3089
3090         if (!netdev_uses_dsa(master))
3091                 return NOTIFY_DONE;
3092
3093         if (!info->linking)
3094                 return NOTIFY_DONE;
3095
3096         /* Allow DSA switch uppers */
3097         if (dsa_slave_dev_check(info->upper_dev))
3098                 return NOTIFY_DONE;
3099
3100         /* Allow bridge uppers of DSA masters, subject to further
3101          * restrictions in dsa_bridge_prechangelower_sanity_check()
3102          */
3103         if (netif_is_bridge_master(info->upper_dev))
3104                 return NOTIFY_DONE;
3105
3106         /* Allow LAG uppers, subject to further restrictions in
3107          * dsa_lag_master_prechangelower_sanity_check()
3108          */
3109         if (netif_is_lag_master(info->upper_dev))
3110                 return dsa_lag_master_validate(info->upper_dev, extack);
3111
3112         NL_SET_ERR_MSG_MOD(extack,
3113                            "DSA master cannot join unknown upper interfaces");
3114         return notifier_from_errno(-EBUSY);
3115 }
3116
3117 static int
3118 dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
3119                                            struct netdev_notifier_changeupper_info *info)
3120 {
3121         struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
3122         struct net_device *lag_dev = info->upper_dev;
3123         struct net_device *lower;
3124         struct list_head *iter;
3125
3126         if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
3127                 return NOTIFY_DONE;
3128
3129         if (!info->linking)
3130                 return NOTIFY_DONE;
3131
3132         if (!netdev_uses_dsa(dev)) {
3133                 NL_SET_ERR_MSG(extack,
3134                                "Only DSA masters can join a LAG DSA master");
3135                 return notifier_from_errno(-EINVAL);
3136         }
3137
3138         netdev_for_each_lower_dev(lag_dev, lower, iter) {
3139                 if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
3140                         NL_SET_ERR_MSG(extack,
3141                                        "Interface is DSA master for a different switch tree than this LAG");
3142                         return notifier_from_errno(-EINVAL);
3143                 }
3144
3145                 break;
3146         }
3147
3148         return NOTIFY_DONE;
3149 }
3150
3151 /* Don't allow bridging of DSA masters, since the bridge layer rx_handler
3152  * prevents the DSA fake ethertype handler to be invoked, so we don't get the
3153  * chance to strip off and parse the DSA switch tag protocol header (the bridge
3154  * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
3155  * frames).
3156  * The only case where that would not be an issue is when bridging can already
3157  * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
3158  * port, and is bridged only with other ports from the same hardware device.
3159  */
3160 static int
3161 dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
3162                                        struct netdev_notifier_changeupper_info *info)
3163 {
3164         struct net_device *br = info->upper_dev;
3165         struct netlink_ext_ack *extack;
3166         struct net_device *lower;
3167         struct list_head *iter;
3168
3169         if (!netif_is_bridge_master(br))
3170                 return NOTIFY_DONE;
3171
3172         if (!info->linking)
3173                 return NOTIFY_DONE;
3174
3175         extack = netdev_notifier_info_to_extack(&info->info);
3176
3177         netdev_for_each_lower_dev(br, lower, iter) {
3178                 if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower))
3179                         continue;
3180
3181                 if (!netdev_port_same_parent_id(lower, new_lower)) {
3182                         NL_SET_ERR_MSG(extack,
3183                                        "Cannot do software bridging with a DSA master");
3184                         return notifier_from_errno(-EINVAL);
3185                 }
3186         }
3187
3188         return NOTIFY_DONE;
3189 }
3190
3191 static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
3192                                                    struct net_device *lag_dev)
3193 {
3194         struct net_device *new_master = dsa_tree_find_first_master(dst);
3195         struct dsa_port *dp;
3196         int err;
3197
3198         dsa_tree_for_each_user_port(dp, dst) {
3199                 if (dsa_port_to_master(dp) != lag_dev)
3200                         continue;
3201
3202                 err = dsa_slave_change_master(dp->slave, new_master, NULL);
3203                 if (err) {
3204                         netdev_err(dp->slave,
3205                                    "failed to restore master to %s: %pe\n",
3206                                    new_master->name, ERR_PTR(err));
3207                 }
3208         }
3209 }
3210
3211 static int dsa_master_lag_join(struct net_device *master,
3212                                struct net_device *lag_dev,
3213                                struct netdev_lag_upper_info *uinfo,
3214                                struct netlink_ext_ack *extack)
3215 {
3216         struct dsa_port *cpu_dp = master->dsa_ptr;
3217         struct dsa_switch_tree *dst = cpu_dp->dst;
3218         struct dsa_port *dp;
3219         int err;
3220
3221         err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
3222         if (err)
3223                 return err;
3224
3225         dsa_tree_for_each_user_port(dp, dst) {
3226                 if (dsa_port_to_master(dp) != master)
3227                         continue;
3228
3229                 err = dsa_slave_change_master(dp->slave, lag_dev, extack);
3230                 if (err)
3231                         goto restore;
3232         }
3233
3234         return 0;
3235
3236 restore:
3237         dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
3238                 if (dsa_port_to_master(dp) != lag_dev)
3239                         continue;
3240
3241                 err = dsa_slave_change_master(dp->slave, master, NULL);
3242                 if (err) {
3243                         netdev_err(dp->slave,
3244                                    "failed to restore master to %s: %pe\n",
3245                                    master->name, ERR_PTR(err));
3246                 }
3247         }
3248
3249         dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3250
3251         return err;
3252 }
3253
3254 static void dsa_master_lag_leave(struct net_device *master,
3255                                  struct net_device *lag_dev)
3256 {
3257         struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
3258         struct dsa_switch_tree *dst = cpu_dp->dst;
3259         struct dsa_port *new_cpu_dp = NULL;
3260         struct net_device *lower;
3261         struct list_head *iter;
3262
3263         netdev_for_each_lower_dev(lag_dev, lower, iter) {
3264                 if (netdev_uses_dsa(lower)) {
3265                         new_cpu_dp = lower->dsa_ptr;
3266                         break;
3267                 }
3268         }
3269
3270         if (new_cpu_dp) {
3271                 /* Update the CPU port of the user ports still under the LAG
3272                  * so that dsa_port_to_master() continues to work properly
3273                  */
3274                 dsa_tree_for_each_user_port(dp, dst)
3275                         if (dsa_port_to_master(dp) == lag_dev)
3276                                 dp->cpu_dp = new_cpu_dp;
3277
3278                 /* Update the index of the virtual CPU port to match the lowest
3279                  * physical CPU port
3280                  */
3281                 lag_dev->dsa_ptr = new_cpu_dp;
3282                 wmb();
3283         } else {
3284                 /* If the LAG DSA master has no ports left, migrate back all
3285                  * user ports to the first physical CPU port
3286                  */
3287                 dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
3288         }
3289
3290         /* This DSA master has left its LAG in any case, so let
3291          * the CPU port leave the hardware LAG as well
3292          */
3293         dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
3294 }
3295
3296 static int dsa_master_changeupper(struct net_device *dev,
3297                                   struct netdev_notifier_changeupper_info *info)
3298 {
3299         struct netlink_ext_ack *extack;
3300         int err = NOTIFY_DONE;
3301
3302         if (!netdev_uses_dsa(dev))
3303                 return err;
3304
3305         extack = netdev_notifier_info_to_extack(&info->info);
3306
3307         if (netif_is_lag_master(info->upper_dev)) {
3308                 if (info->linking) {
3309                         err = dsa_master_lag_join(dev, info->upper_dev,
3310                                                   info->upper_info, extack);
3311                         err = notifier_from_errno(err);
3312                 } else {
3313                         dsa_master_lag_leave(dev, info->upper_dev);
3314                         err = NOTIFY_OK;
3315                 }
3316         }
3317
3318         return err;
3319 }
3320
3321 static int dsa_slave_netdevice_event(struct notifier_block *nb,
3322                                      unsigned long event, void *ptr)
3323 {
3324         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3325
3326         switch (event) {
3327         case NETDEV_PRECHANGEUPPER: {
3328                 struct netdev_notifier_changeupper_info *info = ptr;
3329                 int err;
3330
3331                 err = dsa_slave_prechangeupper_sanity_check(dev, info);
3332                 if (notifier_to_errno(err))
3333                         return err;
3334
3335                 err = dsa_master_prechangeupper_sanity_check(dev, info);
3336                 if (notifier_to_errno(err))
3337                         return err;
3338
3339                 err = dsa_lag_master_prechangelower_sanity_check(dev, info);
3340                 if (notifier_to_errno(err))
3341                         return err;
3342
3343                 err = dsa_bridge_prechangelower_sanity_check(dev, info);
3344                 if (notifier_to_errno(err))
3345                         return err;
3346
3347                 err = dsa_slave_prechangeupper(dev, ptr);
3348                 if (notifier_to_errno(err))
3349                         return err;
3350
3351                 err = dsa_slave_lag_prechangeupper(dev, ptr);
3352                 if (notifier_to_errno(err))
3353                         return err;
3354
3355                 break;
3356         }
3357         case NETDEV_CHANGEUPPER: {
3358                 int err;
3359
3360                 err = dsa_slave_changeupper(dev, ptr);
3361                 if (notifier_to_errno(err))
3362                         return err;
3363
3364                 err = dsa_slave_lag_changeupper(dev, ptr);
3365                 if (notifier_to_errno(err))
3366                         return err;
3367
3368                 err = dsa_master_changeupper(dev, ptr);
3369                 if (notifier_to_errno(err))
3370                         return err;
3371
3372                 break;
3373         }
3374         case NETDEV_CHANGELOWERSTATE: {
3375                 struct netdev_notifier_changelowerstate_info *info = ptr;
3376                 struct dsa_port *dp;
3377                 int err = 0;
3378
3379                 if (dsa_slave_dev_check(dev)) {
3380                         dp = dsa_slave_to_port(dev);
3381
3382                         err = dsa_port_lag_change(dp, info->lower_state_info);
3383                 }
3384
3385                 /* Mirror LAG port events on DSA masters that are in
3386                  * a LAG towards their respective switch CPU ports
3387                  */
3388                 if (netdev_uses_dsa(dev)) {
3389                         dp = dev->dsa_ptr;
3390
3391                         err = dsa_port_lag_change(dp, info->lower_state_info);
3392                 }
3393
3394                 return notifier_from_errno(err);
3395         }
3396         case NETDEV_CHANGE:
3397         case NETDEV_UP: {
3398                 /* Track state of master port.
3399                  * DSA driver may require the master port (and indirectly
3400                  * the tagger) to be available for some special operation.
3401                  */
3402                 if (netdev_uses_dsa(dev)) {
3403                         struct dsa_port *cpu_dp = dev->dsa_ptr;
3404                         struct dsa_switch_tree *dst = cpu_dp->ds->dst;
3405
3406                         /* Track when the master port is UP */
3407                         dsa_tree_master_oper_state_change(dst, dev,
3408                                                           netif_oper_up(dev));
3409
3410                         /* Track when the master port is ready and can accept
3411                          * packet.
3412                          * NETDEV_UP event is not enough to flag a port as ready.
3413                          * We also have to wait for linkwatch_do_dev to dev_activate
3414                          * and emit a NETDEV_CHANGE event.
3415                          * We check if a master port is ready by checking if the dev
3416                          * have a qdisc assigned and is not noop.
3417                          */
3418                         dsa_tree_master_admin_state_change(dst, dev,
3419                                                            !qdisc_tx_is_noop(dev));
3420
3421                         return NOTIFY_OK;
3422                 }
3423
3424                 return NOTIFY_DONE;
3425         }
3426         case NETDEV_GOING_DOWN: {
3427                 struct dsa_port *dp, *cpu_dp;
3428                 struct dsa_switch_tree *dst;
3429                 LIST_HEAD(close_list);
3430
3431                 if (!netdev_uses_dsa(dev))
3432                         return NOTIFY_DONE;
3433
3434                 cpu_dp = dev->dsa_ptr;
3435                 dst = cpu_dp->ds->dst;
3436
3437                 dsa_tree_master_admin_state_change(dst, dev, false);
3438
3439                 list_for_each_entry(dp, &dst->ports, list) {
3440                         if (!dsa_port_is_user(dp))
3441                                 continue;
3442
3443                         if (dp->cpu_dp != cpu_dp)
3444                                 continue;
3445
3446                         list_add(&dp->slave->close_list, &close_list);
3447                 }
3448
3449                 dev_close_many(&close_list, true);
3450
3451                 return NOTIFY_OK;
3452         }
3453         default:
3454                 break;
3455         }
3456
3457         return NOTIFY_DONE;
3458 }
3459
3460 static void
3461 dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
3462 {
3463         struct switchdev_notifier_fdb_info info = {};
3464
3465         info.addr = switchdev_work->addr;
3466         info.vid = switchdev_work->vid;
3467         info.offloaded = true;
3468         call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
3469                                  switchdev_work->orig_dev, &info.info, NULL);
3470 }
3471
3472 static void dsa_slave_switchdev_event_work(struct work_struct *work)
3473 {
3474         struct dsa_switchdev_event_work *switchdev_work =
3475                 container_of(work, struct dsa_switchdev_event_work, work);
3476         const unsigned char *addr = switchdev_work->addr;
3477         struct net_device *dev = switchdev_work->dev;
3478         u16 vid = switchdev_work->vid;
3479         struct dsa_switch *ds;
3480         struct dsa_port *dp;
3481         int err;
3482
3483         dp = dsa_slave_to_port(dev);
3484         ds = dp->ds;
3485
3486         switch (switchdev_work->event) {
3487         case SWITCHDEV_FDB_ADD_TO_DEVICE:
3488                 if (switchdev_work->host_addr)
3489                         err = dsa_port_bridge_host_fdb_add(dp, addr, vid);
3490                 else if (dp->lag)
3491                         err = dsa_port_lag_fdb_add(dp, addr, vid);
3492                 else
3493                         err = dsa_port_fdb_add(dp, addr, vid);
3494                 if (err) {
3495                         dev_err(ds->dev,
3496                                 "port %d failed to add %pM vid %d to fdb: %d\n",
3497                                 dp->index, addr, vid, err);
3498                         break;
3499                 }
3500                 dsa_fdb_offload_notify(switchdev_work);
3501                 break;
3502
3503         case SWITCHDEV_FDB_DEL_TO_DEVICE:
3504                 if (switchdev_work->host_addr)
3505                         err = dsa_port_bridge_host_fdb_del(dp, addr, vid);
3506                 else if (dp->lag)
3507                         err = dsa_port_lag_fdb_del(dp, addr, vid);
3508                 else
3509                         err = dsa_port_fdb_del(dp, addr, vid);
3510                 if (err) {
3511                         dev_err(ds->dev,
3512                                 "port %d failed to delete %pM vid %d from fdb: %d\n",
3513                                 dp->index, addr, vid, err);
3514                 }
3515
3516                 break;
3517         }
3518
3519         kfree(switchdev_work);
3520 }
3521
3522 static bool dsa_foreign_dev_check(const struct net_device *dev,
3523                                   const struct net_device *foreign_dev)
3524 {
3525         const struct dsa_port *dp = dsa_slave_to_port(dev);
3526         struct dsa_switch_tree *dst = dp->ds->dst;
3527
3528         if (netif_is_bridge_master(foreign_dev))
3529                 return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
3530
3531         if (netif_is_bridge_port(foreign_dev))
3532                 return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
3533
3534         /* Everything else is foreign */
3535         return true;
3536 }
3537
3538 static int dsa_slave_fdb_event(struct net_device *dev,
3539                                struct net_device *orig_dev,
3540                                unsigned long event, const void *ctx,
3541                                const struct switchdev_notifier_fdb_info *fdb_info)
3542 {
3543         struct dsa_switchdev_event_work *switchdev_work;
3544         struct dsa_port *dp = dsa_slave_to_port(dev);
3545         bool host_addr = fdb_info->is_local;
3546         struct dsa_switch *ds = dp->ds;
3547
3548         if (ctx && ctx != dp)
3549                 return 0;
3550
3551         if (!dp->bridge)
3552                 return 0;
3553
3554         if (switchdev_fdb_is_dynamically_learned(fdb_info)) {
3555                 if (dsa_port_offloads_bridge_port(dp, orig_dev))
3556                         return 0;
3557
3558                 /* FDB entries learned by the software bridge or by foreign
3559                  * bridge ports should be installed as host addresses only if
3560                  * the driver requests assisted learning.
3561                  */
3562                 if (!ds->assisted_learning_on_cpu_port)
3563                         return 0;
3564         }
3565
3566         /* Also treat FDB entries on foreign interfaces bridged with us as host
3567          * addresses.
3568          */
3569         if (dsa_foreign_dev_check(dev, orig_dev))
3570                 host_addr = true;
3571
3572         /* Check early that we're not doing work in vain.
3573          * Host addresses on LAG ports still require regular FDB ops,
3574          * since the CPU port isn't in a LAG.
3575          */
3576         if (dp->lag && !host_addr) {
3577                 if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del)
3578                         return -EOPNOTSUPP;
3579         } else {
3580                 if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del)
3581                         return -EOPNOTSUPP;
3582         }
3583
3584         switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3585         if (!switchdev_work)
3586                 return -ENOMEM;
3587
3588         netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n",
3589                    event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting",
3590                    orig_dev->name, fdb_info->addr, fdb_info->vid,
3591                    host_addr ? " as host address" : "");
3592
3593         INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
3594         switchdev_work->event = event;
3595         switchdev_work->dev = dev;
3596         switchdev_work->orig_dev = orig_dev;
3597
3598         ether_addr_copy(switchdev_work->addr, fdb_info->addr);
3599         switchdev_work->vid = fdb_info->vid;
3600         switchdev_work->host_addr = host_addr;
3601
3602         dsa_schedule_work(&switchdev_work->work);
3603
3604         return 0;
3605 }
3606
3607 /* Called under rcu_read_lock() */
3608 static int dsa_slave_switchdev_event(struct notifier_block *unused,
3609                                      unsigned long event, void *ptr)
3610 {
3611         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3612         int err;
3613
3614         switch (event) {
3615         case SWITCHDEV_PORT_ATTR_SET:
3616                 err = switchdev_handle_port_attr_set(dev, ptr,
3617                                                      dsa_slave_dev_check,
3618                                                      dsa_slave_port_attr_set);
3619                 return notifier_from_errno(err);
3620         case SWITCHDEV_FDB_ADD_TO_DEVICE:
3621         case SWITCHDEV_FDB_DEL_TO_DEVICE:
3622                 err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
3623                                                            dsa_slave_dev_check,
3624                                                            dsa_foreign_dev_check,
3625                                                            dsa_slave_fdb_event);
3626                 return notifier_from_errno(err);
3627         default:
3628                 return NOTIFY_DONE;
3629         }
3630
3631         return NOTIFY_OK;
3632 }
3633
3634 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
3635                                               unsigned long event, void *ptr)
3636 {
3637         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3638         int err;
3639
3640         switch (event) {
3641         case SWITCHDEV_PORT_OBJ_ADD:
3642                 err = switchdev_handle_port_obj_add_foreign(dev, ptr,
3643                                                             dsa_slave_dev_check,
3644                                                             dsa_foreign_dev_check,
3645                                                             dsa_slave_port_obj_add);
3646                 return notifier_from_errno(err);
3647         case SWITCHDEV_PORT_OBJ_DEL:
3648                 err = switchdev_handle_port_obj_del_foreign(dev, ptr,
3649                                                             dsa_slave_dev_check,
3650                                                             dsa_foreign_dev_check,
3651                                                             dsa_slave_port_obj_del);
3652                 return notifier_from_errno(err);
3653         case SWITCHDEV_PORT_ATTR_SET:
3654                 err = switchdev_handle_port_attr_set(dev, ptr,
3655                                                      dsa_slave_dev_check,
3656                                                      dsa_slave_port_attr_set);
3657                 return notifier_from_errno(err);
3658         }
3659
3660         return NOTIFY_DONE;
3661 }
3662
3663 static struct notifier_block dsa_slave_nb __read_mostly = {
3664         .notifier_call  = dsa_slave_netdevice_event,
3665 };
3666
3667 struct notifier_block dsa_slave_switchdev_notifier = {
3668         .notifier_call = dsa_slave_switchdev_event,
3669 };
3670
3671 struct notifier_block dsa_slave_switchdev_blocking_notifier = {
3672         .notifier_call = dsa_slave_switchdev_blocking_event,
3673 };
3674
3675 int dsa_slave_register_notifier(void)
3676 {
3677         struct notifier_block *nb;
3678         int err;
3679
3680         err = register_netdevice_notifier(&dsa_slave_nb);
3681         if (err)
3682                 return err;
3683
3684         err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
3685         if (err)
3686                 goto err_switchdev_nb;
3687
3688         nb = &dsa_slave_switchdev_blocking_notifier;
3689         err = register_switchdev_blocking_notifier(nb);
3690         if (err)
3691                 goto err_switchdev_blocking_nb;
3692
3693         return 0;
3694
3695 err_switchdev_blocking_nb:
3696         unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3697 err_switchdev_nb:
3698         unregister_netdevice_notifier(&dsa_slave_nb);
3699         return err;
3700 }
3701
3702 void dsa_slave_unregister_notifier(void)
3703 {
3704         struct notifier_block *nb;
3705         int err;
3706
3707         nb = &dsa_slave_switchdev_blocking_notifier;
3708         err = unregister_switchdev_blocking_notifier(nb);
3709         if (err)
3710                 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
3711
3712         err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
3713         if (err)
3714                 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
3715
3716         err = unregister_netdevice_notifier(&dsa_slave_nb);
3717         if (err)
3718                 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
3719 }