Merge tag 'libnvdimm-fixes-5.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / net / dsa / slave.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/dsa/slave.c - Slave device handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
22
23 #include "dsa_priv.h"
24
25 static bool dsa_slave_dev_check(const struct net_device *dev);
26
27 /* slave mii_bus handling ***************************************************/
28 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
29 {
30         struct dsa_switch *ds = bus->priv;
31
32         if (ds->phys_mii_mask & (1 << addr))
33                 return ds->ops->phy_read(ds, addr, reg);
34
35         return 0xffff;
36 }
37
38 static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
39 {
40         struct dsa_switch *ds = bus->priv;
41
42         if (ds->phys_mii_mask & (1 << addr))
43                 return ds->ops->phy_write(ds, addr, reg, val);
44
45         return 0;
46 }
47
48 void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49 {
50         ds->slave_mii_bus->priv = (void *)ds;
51         ds->slave_mii_bus->name = "dsa slave smi";
52         ds->slave_mii_bus->read = dsa_slave_phy_read;
53         ds->slave_mii_bus->write = dsa_slave_phy_write;
54         snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
55                  ds->dst->index, ds->index);
56         ds->slave_mii_bus->parent = ds->dev;
57         ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
58 }
59
60
61 /* slave device handling ****************************************************/
62 static int dsa_slave_get_iflink(const struct net_device *dev)
63 {
64         return dsa_slave_to_master(dev)->ifindex;
65 }
66
67 static int dsa_slave_open(struct net_device *dev)
68 {
69         struct net_device *master = dsa_slave_to_master(dev);
70         struct dsa_port *dp = dsa_slave_to_port(dev);
71         int err;
72
73         if (!(master->flags & IFF_UP))
74                 return -ENETDOWN;
75
76         if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
77                 err = dev_uc_add(master, dev->dev_addr);
78                 if (err < 0)
79                         goto out;
80         }
81
82         if (dev->flags & IFF_ALLMULTI) {
83                 err = dev_set_allmulti(master, 1);
84                 if (err < 0)
85                         goto del_unicast;
86         }
87         if (dev->flags & IFF_PROMISC) {
88                 err = dev_set_promiscuity(master, 1);
89                 if (err < 0)
90                         goto clear_allmulti;
91         }
92
93         err = dsa_port_enable(dp, dev->phydev);
94         if (err)
95                 goto clear_promisc;
96
97         phylink_start(dp->pl);
98
99         return 0;
100
101 clear_promisc:
102         if (dev->flags & IFF_PROMISC)
103                 dev_set_promiscuity(master, -1);
104 clear_allmulti:
105         if (dev->flags & IFF_ALLMULTI)
106                 dev_set_allmulti(master, -1);
107 del_unicast:
108         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
109                 dev_uc_del(master, dev->dev_addr);
110 out:
111         return err;
112 }
113
114 static int dsa_slave_close(struct net_device *dev)
115 {
116         struct net_device *master = dsa_slave_to_master(dev);
117         struct dsa_port *dp = dsa_slave_to_port(dev);
118
119         cancel_work_sync(&dp->xmit_work);
120         skb_queue_purge(&dp->xmit_queue);
121
122         phylink_stop(dp->pl);
123
124         dsa_port_disable(dp);
125
126         dev_mc_unsync(master, dev);
127         dev_uc_unsync(master, dev);
128         if (dev->flags & IFF_ALLMULTI)
129                 dev_set_allmulti(master, -1);
130         if (dev->flags & IFF_PROMISC)
131                 dev_set_promiscuity(master, -1);
132
133         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
134                 dev_uc_del(master, dev->dev_addr);
135
136         return 0;
137 }
138
139 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
140 {
141         struct net_device *master = dsa_slave_to_master(dev);
142         if (dev->flags & IFF_UP) {
143                 if (change & IFF_ALLMULTI)
144                         dev_set_allmulti(master,
145                                          dev->flags & IFF_ALLMULTI ? 1 : -1);
146                 if (change & IFF_PROMISC)
147                         dev_set_promiscuity(master,
148                                             dev->flags & IFF_PROMISC ? 1 : -1);
149         }
150 }
151
152 static void dsa_slave_set_rx_mode(struct net_device *dev)
153 {
154         struct net_device *master = dsa_slave_to_master(dev);
155
156         dev_mc_sync(master, dev);
157         dev_uc_sync(master, dev);
158 }
159
160 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
161 {
162         struct net_device *master = dsa_slave_to_master(dev);
163         struct sockaddr *addr = a;
164         int err;
165
166         if (!is_valid_ether_addr(addr->sa_data))
167                 return -EADDRNOTAVAIL;
168
169         if (!(dev->flags & IFF_UP))
170                 goto out;
171
172         if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
173                 err = dev_uc_add(master, addr->sa_data);
174                 if (err < 0)
175                         return err;
176         }
177
178         if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
179                 dev_uc_del(master, dev->dev_addr);
180
181 out:
182         ether_addr_copy(dev->dev_addr, addr->sa_data);
183
184         return 0;
185 }
186
187 struct dsa_slave_dump_ctx {
188         struct net_device *dev;
189         struct sk_buff *skb;
190         struct netlink_callback *cb;
191         int idx;
192 };
193
194 static int
195 dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
196                            bool is_static, void *data)
197 {
198         struct dsa_slave_dump_ctx *dump = data;
199         u32 portid = NETLINK_CB(dump->cb->skb).portid;
200         u32 seq = dump->cb->nlh->nlmsg_seq;
201         struct nlmsghdr *nlh;
202         struct ndmsg *ndm;
203
204         if (dump->idx < dump->cb->args[2])
205                 goto skip;
206
207         nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
208                         sizeof(*ndm), NLM_F_MULTI);
209         if (!nlh)
210                 return -EMSGSIZE;
211
212         ndm = nlmsg_data(nlh);
213         ndm->ndm_family  = AF_BRIDGE;
214         ndm->ndm_pad1    = 0;
215         ndm->ndm_pad2    = 0;
216         ndm->ndm_flags   = NTF_SELF;
217         ndm->ndm_type    = 0;
218         ndm->ndm_ifindex = dump->dev->ifindex;
219         ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
220
221         if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
222                 goto nla_put_failure;
223
224         if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
225                 goto nla_put_failure;
226
227         nlmsg_end(dump->skb, nlh);
228
229 skip:
230         dump->idx++;
231         return 0;
232
233 nla_put_failure:
234         nlmsg_cancel(dump->skb, nlh);
235         return -EMSGSIZE;
236 }
237
238 static int
239 dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
240                    struct net_device *dev, struct net_device *filter_dev,
241                    int *idx)
242 {
243         struct dsa_port *dp = dsa_slave_to_port(dev);
244         struct dsa_slave_dump_ctx dump = {
245                 .dev = dev,
246                 .skb = skb,
247                 .cb = cb,
248                 .idx = *idx,
249         };
250         int err;
251
252         err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
253         *idx = dump.idx;
254
255         return err;
256 }
257
258 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
259 {
260         struct dsa_slave_priv *p = netdev_priv(dev);
261         struct dsa_switch *ds = p->dp->ds;
262         int port = p->dp->index;
263
264         /* Pass through to switch driver if it supports timestamping */
265         switch (cmd) {
266         case SIOCGHWTSTAMP:
267                 if (ds->ops->port_hwtstamp_get)
268                         return ds->ops->port_hwtstamp_get(ds, port, ifr);
269                 break;
270         case SIOCSHWTSTAMP:
271                 if (ds->ops->port_hwtstamp_set)
272                         return ds->ops->port_hwtstamp_set(ds, port, ifr);
273                 break;
274         }
275
276         return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
277 }
278
279 static int dsa_slave_port_attr_set(struct net_device *dev,
280                                    const struct switchdev_attr *attr,
281                                    struct switchdev_trans *trans)
282 {
283         struct dsa_port *dp = dsa_slave_to_port(dev);
284         int ret;
285
286         switch (attr->id) {
287         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
288                 ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
289                 break;
290         case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
291                 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
292                                               trans);
293                 break;
294         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
295                 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
296                 break;
297         case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
298                 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
299                                                 trans);
300                 break;
301         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
302                 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
303                 break;
304         case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
305                 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
306                 break;
307         default:
308                 ret = -EOPNOTSUPP;
309                 break;
310         }
311
312         return ret;
313 }
314
315 static int dsa_slave_port_obj_add(struct net_device *dev,
316                                   const struct switchdev_obj *obj,
317                                   struct switchdev_trans *trans,
318                                   struct netlink_ext_ack *extack)
319 {
320         struct dsa_port *dp = dsa_slave_to_port(dev);
321         int err;
322
323         /* For the prepare phase, ensure the full set of changes is feasable in
324          * one go in order to signal a failure properly. If an operation is not
325          * supported, return -EOPNOTSUPP.
326          */
327
328         switch (obj->id) {
329         case SWITCHDEV_OBJ_ID_PORT_MDB:
330                 if (obj->orig_dev != dev)
331                         return -EOPNOTSUPP;
332                 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
333                 break;
334         case SWITCHDEV_OBJ_ID_HOST_MDB:
335                 /* DSA can directly translate this to a normal MDB add,
336                  * but on the CPU port.
337                  */
338                 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
339                                        trans);
340                 break;
341         case SWITCHDEV_OBJ_ID_PORT_VLAN:
342                 if (obj->orig_dev != dev)
343                         return -EOPNOTSUPP;
344                 err = dsa_port_vlan_add(dp, SWITCHDEV_OBJ_PORT_VLAN(obj),
345                                         trans);
346                 break;
347         default:
348                 err = -EOPNOTSUPP;
349                 break;
350         }
351
352         return err;
353 }
354
355 static int dsa_slave_port_obj_del(struct net_device *dev,
356                                   const struct switchdev_obj *obj)
357 {
358         struct dsa_port *dp = dsa_slave_to_port(dev);
359         int err;
360
361         switch (obj->id) {
362         case SWITCHDEV_OBJ_ID_PORT_MDB:
363                 if (obj->orig_dev != dev)
364                         return -EOPNOTSUPP;
365                 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
366                 break;
367         case SWITCHDEV_OBJ_ID_HOST_MDB:
368                 /* DSA can directly translate this to a normal MDB add,
369                  * but on the CPU port.
370                  */
371                 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
372                 break;
373         case SWITCHDEV_OBJ_ID_PORT_VLAN:
374                 if (obj->orig_dev != dev)
375                         return -EOPNOTSUPP;
376                 err = dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
377                 break;
378         default:
379                 err = -EOPNOTSUPP;
380                 break;
381         }
382
383         return err;
384 }
385
386 static int dsa_slave_get_port_parent_id(struct net_device *dev,
387                                         struct netdev_phys_item_id *ppid)
388 {
389         struct dsa_port *dp = dsa_slave_to_port(dev);
390         struct dsa_switch *ds = dp->ds;
391         struct dsa_switch_tree *dst = ds->dst;
392
393         /* For non-legacy ports, devlink is used and it takes
394          * care of the name generation. This ndo implementation
395          * should be removed with legacy support.
396          */
397         if (dp->ds->devlink)
398                 return -EOPNOTSUPP;
399
400         ppid->id_len = sizeof(dst->index);
401         memcpy(&ppid->id, &dst->index, ppid->id_len);
402
403         return 0;
404 }
405
406 static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
407                                                      struct sk_buff *skb)
408 {
409 #ifdef CONFIG_NET_POLL_CONTROLLER
410         struct dsa_slave_priv *p = netdev_priv(dev);
411
412         if (p->netpoll)
413                 netpoll_send_skb(p->netpoll, skb);
414 #else
415         BUG();
416 #endif
417         return NETDEV_TX_OK;
418 }
419
420 static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
421                                  struct sk_buff *skb)
422 {
423         struct dsa_switch *ds = p->dp->ds;
424         struct sk_buff *clone;
425         unsigned int type;
426
427         type = ptp_classify_raw(skb);
428         if (type == PTP_CLASS_NONE)
429                 return;
430
431         if (!ds->ops->port_txtstamp)
432                 return;
433
434         clone = skb_clone_sk(skb);
435         if (!clone)
436                 return;
437
438         DSA_SKB_CB(skb)->clone = clone;
439
440         if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
441                 return;
442
443         kfree_skb(clone);
444 }
445
446 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
447 {
448         /* SKB for netpoll still need to be mangled with the protocol-specific
449          * tag to be successfully transmitted
450          */
451         if (unlikely(netpoll_tx_running(dev)))
452                 return dsa_slave_netpoll_send_skb(dev, skb);
453
454         /* Queue the SKB for transmission on the parent interface, but
455          * do not modify its EtherType
456          */
457         skb->dev = dsa_slave_to_master(dev);
458         dev_queue_xmit(skb);
459
460         return NETDEV_TX_OK;
461 }
462 EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
463
464 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
465 {
466         struct dsa_slave_priv *p = netdev_priv(dev);
467         struct pcpu_sw_netstats *s;
468         struct sk_buff *nskb;
469
470         s = this_cpu_ptr(p->stats64);
471         u64_stats_update_begin(&s->syncp);
472         s->tx_packets++;
473         s->tx_bytes += skb->len;
474         u64_stats_update_end(&s->syncp);
475
476         DSA_SKB_CB(skb)->deferred_xmit = false;
477         DSA_SKB_CB(skb)->clone = NULL;
478
479         /* Identify PTP protocol packets, clone them, and pass them to the
480          * switch driver
481          */
482         dsa_skb_tx_timestamp(p, skb);
483
484         /* Transmit function may have to reallocate the original SKB,
485          * in which case it must have freed it. Only free it here on error.
486          */
487         nskb = p->xmit(skb, dev);
488         if (!nskb) {
489                 if (!DSA_SKB_CB(skb)->deferred_xmit)
490                         kfree_skb(skb);
491                 return NETDEV_TX_OK;
492         }
493
494         return dsa_enqueue_skb(nskb, dev);
495 }
496
497 void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev)
498 {
499         struct dsa_port *dp = dsa_slave_to_port(dev);
500
501         DSA_SKB_CB(skb)->deferred_xmit = true;
502
503         skb_queue_tail(&dp->xmit_queue, skb);
504         schedule_work(&dp->xmit_work);
505         return NULL;
506 }
507 EXPORT_SYMBOL_GPL(dsa_defer_xmit);
508
509 static void dsa_port_xmit_work(struct work_struct *work)
510 {
511         struct dsa_port *dp = container_of(work, struct dsa_port, xmit_work);
512         struct dsa_switch *ds = dp->ds;
513         struct sk_buff *skb;
514
515         if (unlikely(!ds->ops->port_deferred_xmit))
516                 return;
517
518         while ((skb = skb_dequeue(&dp->xmit_queue)) != NULL)
519                 ds->ops->port_deferred_xmit(ds, dp->index, skb);
520 }
521
522 /* ethtool operations *******************************************************/
523
524 static void dsa_slave_get_drvinfo(struct net_device *dev,
525                                   struct ethtool_drvinfo *drvinfo)
526 {
527         strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
528         strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
529         strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
530 }
531
532 static int dsa_slave_get_regs_len(struct net_device *dev)
533 {
534         struct dsa_port *dp = dsa_slave_to_port(dev);
535         struct dsa_switch *ds = dp->ds;
536
537         if (ds->ops->get_regs_len)
538                 return ds->ops->get_regs_len(ds, dp->index);
539
540         return -EOPNOTSUPP;
541 }
542
543 static void
544 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
545 {
546         struct dsa_port *dp = dsa_slave_to_port(dev);
547         struct dsa_switch *ds = dp->ds;
548
549         if (ds->ops->get_regs)
550                 ds->ops->get_regs(ds, dp->index, regs, _p);
551 }
552
553 static int dsa_slave_nway_reset(struct net_device *dev)
554 {
555         struct dsa_port *dp = dsa_slave_to_port(dev);
556
557         return phylink_ethtool_nway_reset(dp->pl);
558 }
559
560 static int dsa_slave_get_eeprom_len(struct net_device *dev)
561 {
562         struct dsa_port *dp = dsa_slave_to_port(dev);
563         struct dsa_switch *ds = dp->ds;
564
565         if (ds->cd && ds->cd->eeprom_len)
566                 return ds->cd->eeprom_len;
567
568         if (ds->ops->get_eeprom_len)
569                 return ds->ops->get_eeprom_len(ds);
570
571         return 0;
572 }
573
574 static int dsa_slave_get_eeprom(struct net_device *dev,
575                                 struct ethtool_eeprom *eeprom, u8 *data)
576 {
577         struct dsa_port *dp = dsa_slave_to_port(dev);
578         struct dsa_switch *ds = dp->ds;
579
580         if (ds->ops->get_eeprom)
581                 return ds->ops->get_eeprom(ds, eeprom, data);
582
583         return -EOPNOTSUPP;
584 }
585
586 static int dsa_slave_set_eeprom(struct net_device *dev,
587                                 struct ethtool_eeprom *eeprom, u8 *data)
588 {
589         struct dsa_port *dp = dsa_slave_to_port(dev);
590         struct dsa_switch *ds = dp->ds;
591
592         if (ds->ops->set_eeprom)
593                 return ds->ops->set_eeprom(ds, eeprom, data);
594
595         return -EOPNOTSUPP;
596 }
597
598 static void dsa_slave_get_strings(struct net_device *dev,
599                                   uint32_t stringset, uint8_t *data)
600 {
601         struct dsa_port *dp = dsa_slave_to_port(dev);
602         struct dsa_switch *ds = dp->ds;
603
604         if (stringset == ETH_SS_STATS) {
605                 int len = ETH_GSTRING_LEN;
606
607                 strncpy(data, "tx_packets", len);
608                 strncpy(data + len, "tx_bytes", len);
609                 strncpy(data + 2 * len, "rx_packets", len);
610                 strncpy(data + 3 * len, "rx_bytes", len);
611                 if (ds->ops->get_strings)
612                         ds->ops->get_strings(ds, dp->index, stringset,
613                                              data + 4 * len);
614         }
615 }
616
617 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
618                                         struct ethtool_stats *stats,
619                                         uint64_t *data)
620 {
621         struct dsa_port *dp = dsa_slave_to_port(dev);
622         struct dsa_slave_priv *p = netdev_priv(dev);
623         struct dsa_switch *ds = dp->ds;
624         struct pcpu_sw_netstats *s;
625         unsigned int start;
626         int i;
627
628         for_each_possible_cpu(i) {
629                 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
630
631                 s = per_cpu_ptr(p->stats64, i);
632                 do {
633                         start = u64_stats_fetch_begin_irq(&s->syncp);
634                         tx_packets = s->tx_packets;
635                         tx_bytes = s->tx_bytes;
636                         rx_packets = s->rx_packets;
637                         rx_bytes = s->rx_bytes;
638                 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
639                 data[0] += tx_packets;
640                 data[1] += tx_bytes;
641                 data[2] += rx_packets;
642                 data[3] += rx_bytes;
643         }
644         if (ds->ops->get_ethtool_stats)
645                 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
646 }
647
648 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
649 {
650         struct dsa_port *dp = dsa_slave_to_port(dev);
651         struct dsa_switch *ds = dp->ds;
652
653         if (sset == ETH_SS_STATS) {
654                 int count;
655
656                 count = 4;
657                 if (ds->ops->get_sset_count)
658                         count += ds->ops->get_sset_count(ds, dp->index, sset);
659
660                 return count;
661         }
662
663         return -EOPNOTSUPP;
664 }
665
666 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
667 {
668         struct dsa_port *dp = dsa_slave_to_port(dev);
669         struct dsa_switch *ds = dp->ds;
670
671         phylink_ethtool_get_wol(dp->pl, w);
672
673         if (ds->ops->get_wol)
674                 ds->ops->get_wol(ds, dp->index, w);
675 }
676
677 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
678 {
679         struct dsa_port *dp = dsa_slave_to_port(dev);
680         struct dsa_switch *ds = dp->ds;
681         int ret = -EOPNOTSUPP;
682
683         phylink_ethtool_set_wol(dp->pl, w);
684
685         if (ds->ops->set_wol)
686                 ret = ds->ops->set_wol(ds, dp->index, w);
687
688         return ret;
689 }
690
691 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
692 {
693         struct dsa_port *dp = dsa_slave_to_port(dev);
694         struct dsa_switch *ds = dp->ds;
695         int ret;
696
697         /* Port's PHY and MAC both need to be EEE capable */
698         if (!dev->phydev || !dp->pl)
699                 return -ENODEV;
700
701         if (!ds->ops->set_mac_eee)
702                 return -EOPNOTSUPP;
703
704         ret = ds->ops->set_mac_eee(ds, dp->index, e);
705         if (ret)
706                 return ret;
707
708         return phylink_ethtool_set_eee(dp->pl, e);
709 }
710
711 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
712 {
713         struct dsa_port *dp = dsa_slave_to_port(dev);
714         struct dsa_switch *ds = dp->ds;
715         int ret;
716
717         /* Port's PHY and MAC both need to be EEE capable */
718         if (!dev->phydev || !dp->pl)
719                 return -ENODEV;
720
721         if (!ds->ops->get_mac_eee)
722                 return -EOPNOTSUPP;
723
724         ret = ds->ops->get_mac_eee(ds, dp->index, e);
725         if (ret)
726                 return ret;
727
728         return phylink_ethtool_get_eee(dp->pl, e);
729 }
730
731 static int dsa_slave_get_link_ksettings(struct net_device *dev,
732                                         struct ethtool_link_ksettings *cmd)
733 {
734         struct dsa_port *dp = dsa_slave_to_port(dev);
735
736         return phylink_ethtool_ksettings_get(dp->pl, cmd);
737 }
738
739 static int dsa_slave_set_link_ksettings(struct net_device *dev,
740                                         const struct ethtool_link_ksettings *cmd)
741 {
742         struct dsa_port *dp = dsa_slave_to_port(dev);
743
744         return phylink_ethtool_ksettings_set(dp->pl, cmd);
745 }
746
747 #ifdef CONFIG_NET_POLL_CONTROLLER
748 static int dsa_slave_netpoll_setup(struct net_device *dev,
749                                    struct netpoll_info *ni)
750 {
751         struct net_device *master = dsa_slave_to_master(dev);
752         struct dsa_slave_priv *p = netdev_priv(dev);
753         struct netpoll *netpoll;
754         int err = 0;
755
756         netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
757         if (!netpoll)
758                 return -ENOMEM;
759
760         err = __netpoll_setup(netpoll, master);
761         if (err) {
762                 kfree(netpoll);
763                 goto out;
764         }
765
766         p->netpoll = netpoll;
767 out:
768         return err;
769 }
770
771 static void dsa_slave_netpoll_cleanup(struct net_device *dev)
772 {
773         struct dsa_slave_priv *p = netdev_priv(dev);
774         struct netpoll *netpoll = p->netpoll;
775
776         if (!netpoll)
777                 return;
778
779         p->netpoll = NULL;
780
781         __netpoll_free(netpoll);
782 }
783
784 static void dsa_slave_poll_controller(struct net_device *dev)
785 {
786 }
787 #endif
788
789 static int dsa_slave_get_phys_port_name(struct net_device *dev,
790                                         char *name, size_t len)
791 {
792         struct dsa_port *dp = dsa_slave_to_port(dev);
793
794         /* For non-legacy ports, devlink is used and it takes
795          * care of the name generation. This ndo implementation
796          * should be removed with legacy support.
797          */
798         if (dp->ds->devlink)
799                 return -EOPNOTSUPP;
800
801         if (snprintf(name, len, "p%d", dp->index) >= len)
802                 return -EINVAL;
803
804         return 0;
805 }
806
807 static struct dsa_mall_tc_entry *
808 dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
809 {
810         struct dsa_slave_priv *p = netdev_priv(dev);
811         struct dsa_mall_tc_entry *mall_tc_entry;
812
813         list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
814                 if (mall_tc_entry->cookie == cookie)
815                         return mall_tc_entry;
816
817         return NULL;
818 }
819
820 static int dsa_slave_add_cls_matchall(struct net_device *dev,
821                                       struct tc_cls_matchall_offload *cls,
822                                       bool ingress)
823 {
824         struct dsa_port *dp = dsa_slave_to_port(dev);
825         struct dsa_slave_priv *p = netdev_priv(dev);
826         struct dsa_mall_tc_entry *mall_tc_entry;
827         __be16 protocol = cls->common.protocol;
828         struct dsa_switch *ds = dp->ds;
829         struct flow_action_entry *act;
830         struct dsa_port *to_dp;
831         int err = -EOPNOTSUPP;
832
833         if (!ds->ops->port_mirror_add)
834                 return err;
835
836         if (!flow_offload_has_one_action(&cls->rule->action))
837                 return err;
838
839         act = &cls->rule->action.entries[0];
840
841         if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
842                 struct dsa_mall_mirror_tc_entry *mirror;
843
844                 if (!act->dev)
845                         return -EINVAL;
846
847                 if (!dsa_slave_dev_check(act->dev))
848                         return -EOPNOTSUPP;
849
850                 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
851                 if (!mall_tc_entry)
852                         return -ENOMEM;
853
854                 mall_tc_entry->cookie = cls->cookie;
855                 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
856                 mirror = &mall_tc_entry->mirror;
857
858                 to_dp = dsa_slave_to_port(act->dev);
859
860                 mirror->to_local_port = to_dp->index;
861                 mirror->ingress = ingress;
862
863                 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
864                 if (err) {
865                         kfree(mall_tc_entry);
866                         return err;
867                 }
868
869                 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
870         }
871
872         return 0;
873 }
874
875 static void dsa_slave_del_cls_matchall(struct net_device *dev,
876                                        struct tc_cls_matchall_offload *cls)
877 {
878         struct dsa_port *dp = dsa_slave_to_port(dev);
879         struct dsa_mall_tc_entry *mall_tc_entry;
880         struct dsa_switch *ds = dp->ds;
881
882         if (!ds->ops->port_mirror_del)
883                 return;
884
885         mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
886         if (!mall_tc_entry)
887                 return;
888
889         list_del(&mall_tc_entry->list);
890
891         switch (mall_tc_entry->type) {
892         case DSA_PORT_MALL_MIRROR:
893                 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
894                 break;
895         default:
896                 WARN_ON(1);
897         }
898
899         kfree(mall_tc_entry);
900 }
901
902 static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
903                                            struct tc_cls_matchall_offload *cls,
904                                            bool ingress)
905 {
906         if (cls->common.chain_index)
907                 return -EOPNOTSUPP;
908
909         switch (cls->command) {
910         case TC_CLSMATCHALL_REPLACE:
911                 return dsa_slave_add_cls_matchall(dev, cls, ingress);
912         case TC_CLSMATCHALL_DESTROY:
913                 dsa_slave_del_cls_matchall(dev, cls);
914                 return 0;
915         default:
916                 return -EOPNOTSUPP;
917         }
918 }
919
920 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
921                                        void *cb_priv, bool ingress)
922 {
923         struct net_device *dev = cb_priv;
924
925         if (!tc_can_offload(dev))
926                 return -EOPNOTSUPP;
927
928         switch (type) {
929         case TC_SETUP_CLSMATCHALL:
930                 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
931         default:
932                 return -EOPNOTSUPP;
933         }
934 }
935
936 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
937                                           void *type_data, void *cb_priv)
938 {
939         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
940 }
941
942 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
943                                           void *type_data, void *cb_priv)
944 {
945         return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
946 }
947
948 static LIST_HEAD(dsa_slave_block_cb_list);
949
950 static int dsa_slave_setup_tc_block(struct net_device *dev,
951                                     struct flow_block_offload *f)
952 {
953         struct flow_block_cb *block_cb;
954         flow_setup_cb_t *cb;
955
956         if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
957                 cb = dsa_slave_setup_tc_block_cb_ig;
958         else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
959                 cb = dsa_slave_setup_tc_block_cb_eg;
960         else
961                 return -EOPNOTSUPP;
962
963         f->driver_block_list = &dsa_slave_block_cb_list;
964
965         switch (f->command) {
966         case FLOW_BLOCK_BIND:
967                 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
968                         return -EBUSY;
969
970                 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
971                 if (IS_ERR(block_cb))
972                         return PTR_ERR(block_cb);
973
974                 flow_block_cb_add(block_cb, f);
975                 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
976                 return 0;
977         case FLOW_BLOCK_UNBIND:
978                 block_cb = flow_block_cb_lookup(f->block, cb, dev);
979                 if (!block_cb)
980                         return -ENOENT;
981
982                 flow_block_cb_remove(block_cb, f);
983                 list_del(&block_cb->driver_list);
984                 return 0;
985         default:
986                 return -EOPNOTSUPP;
987         }
988 }
989
990 static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
991                               void *type_data)
992 {
993         switch (type) {
994         case TC_SETUP_BLOCK:
995                 return dsa_slave_setup_tc_block(dev, type_data);
996         default:
997                 return -EOPNOTSUPP;
998         }
999 }
1000
1001 static void dsa_slave_get_stats64(struct net_device *dev,
1002                                   struct rtnl_link_stats64 *stats)
1003 {
1004         struct dsa_slave_priv *p = netdev_priv(dev);
1005         struct pcpu_sw_netstats *s;
1006         unsigned int start;
1007         int i;
1008
1009         netdev_stats_to_stats64(stats, &dev->stats);
1010         for_each_possible_cpu(i) {
1011                 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1012
1013                 s = per_cpu_ptr(p->stats64, i);
1014                 do {
1015                         start = u64_stats_fetch_begin_irq(&s->syncp);
1016                         tx_packets = s->tx_packets;
1017                         tx_bytes = s->tx_bytes;
1018                         rx_packets = s->rx_packets;
1019                         rx_bytes = s->rx_bytes;
1020                 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
1021
1022                 stats->tx_packets += tx_packets;
1023                 stats->tx_bytes += tx_bytes;
1024                 stats->rx_packets += rx_packets;
1025                 stats->rx_bytes += rx_bytes;
1026         }
1027 }
1028
1029 static int dsa_slave_get_rxnfc(struct net_device *dev,
1030                                struct ethtool_rxnfc *nfc, u32 *rule_locs)
1031 {
1032         struct dsa_port *dp = dsa_slave_to_port(dev);
1033         struct dsa_switch *ds = dp->ds;
1034
1035         if (!ds->ops->get_rxnfc)
1036                 return -EOPNOTSUPP;
1037
1038         return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
1039 }
1040
1041 static int dsa_slave_set_rxnfc(struct net_device *dev,
1042                                struct ethtool_rxnfc *nfc)
1043 {
1044         struct dsa_port *dp = dsa_slave_to_port(dev);
1045         struct dsa_switch *ds = dp->ds;
1046
1047         if (!ds->ops->set_rxnfc)
1048                 return -EOPNOTSUPP;
1049
1050         return ds->ops->set_rxnfc(ds, dp->index, nfc);
1051 }
1052
1053 static int dsa_slave_get_ts_info(struct net_device *dev,
1054                                  struct ethtool_ts_info *ts)
1055 {
1056         struct dsa_slave_priv *p = netdev_priv(dev);
1057         struct dsa_switch *ds = p->dp->ds;
1058
1059         if (!ds->ops->get_ts_info)
1060                 return -EOPNOTSUPP;
1061
1062         return ds->ops->get_ts_info(ds, p->dp->index, ts);
1063 }
1064
1065 static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1066                                      u16 vid)
1067 {
1068         struct dsa_port *dp = dsa_slave_to_port(dev);
1069         struct bridge_vlan_info info;
1070         int ret;
1071
1072         /* Check for a possible bridge VLAN entry now since there is no
1073          * need to emulate the switchdev prepare + commit phase.
1074          */
1075         if (dp->bridge_dev) {
1076                 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1077                  * device, respectively the VID is not found, returning
1078                  * 0 means success, which is a failure for us here.
1079                  */
1080                 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1081                 if (ret == 0)
1082                         return -EBUSY;
1083         }
1084
1085         /* This API only allows programming tagged, non-PVID VIDs */
1086         return dsa_port_vid_add(dp, vid, 0);
1087 }
1088
1089 static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1090                                       u16 vid)
1091 {
1092         struct dsa_port *dp = dsa_slave_to_port(dev);
1093         struct bridge_vlan_info info;
1094         int ret;
1095
1096         /* Check for a possible bridge VLAN entry now since there is no
1097          * need to emulate the switchdev prepare + commit phase.
1098          */
1099         if (dp->bridge_dev) {
1100                 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1101                  * device, respectively the VID is not found, returning
1102                  * 0 means success, which is a failure for us here.
1103                  */
1104                 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1105                 if (ret == 0)
1106                         return -EBUSY;
1107         }
1108
1109         ret = dsa_port_vid_del(dp, vid);
1110         if (ret == -EOPNOTSUPP)
1111                 ret = 0;
1112
1113         return ret;
1114 }
1115
1116 static const struct ethtool_ops dsa_slave_ethtool_ops = {
1117         .get_drvinfo            = dsa_slave_get_drvinfo,
1118         .get_regs_len           = dsa_slave_get_regs_len,
1119         .get_regs               = dsa_slave_get_regs,
1120         .nway_reset             = dsa_slave_nway_reset,
1121         .get_link               = ethtool_op_get_link,
1122         .get_eeprom_len         = dsa_slave_get_eeprom_len,
1123         .get_eeprom             = dsa_slave_get_eeprom,
1124         .set_eeprom             = dsa_slave_set_eeprom,
1125         .get_strings            = dsa_slave_get_strings,
1126         .get_ethtool_stats      = dsa_slave_get_ethtool_stats,
1127         .get_sset_count         = dsa_slave_get_sset_count,
1128         .set_wol                = dsa_slave_set_wol,
1129         .get_wol                = dsa_slave_get_wol,
1130         .set_eee                = dsa_slave_set_eee,
1131         .get_eee                = dsa_slave_get_eee,
1132         .get_link_ksettings     = dsa_slave_get_link_ksettings,
1133         .set_link_ksettings     = dsa_slave_set_link_ksettings,
1134         .get_rxnfc              = dsa_slave_get_rxnfc,
1135         .set_rxnfc              = dsa_slave_set_rxnfc,
1136         .get_ts_info            = dsa_slave_get_ts_info,
1137 };
1138
1139 /* legacy way, bypassing the bridge *****************************************/
1140 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1141                        struct net_device *dev,
1142                        const unsigned char *addr, u16 vid,
1143                        u16 flags,
1144                        struct netlink_ext_ack *extack)
1145 {
1146         struct dsa_port *dp = dsa_slave_to_port(dev);
1147
1148         return dsa_port_fdb_add(dp, addr, vid);
1149 }
1150
1151 int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1152                        struct net_device *dev,
1153                        const unsigned char *addr, u16 vid)
1154 {
1155         struct dsa_port *dp = dsa_slave_to_port(dev);
1156
1157         return dsa_port_fdb_del(dp, addr, vid);
1158 }
1159
1160 static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1161 {
1162         struct dsa_port *dp = dsa_slave_to_port(dev);
1163
1164         return dp->ds->devlink ? &dp->devlink_port : NULL;
1165 }
1166
1167 static const struct net_device_ops dsa_slave_netdev_ops = {
1168         .ndo_open               = dsa_slave_open,
1169         .ndo_stop               = dsa_slave_close,
1170         .ndo_start_xmit         = dsa_slave_xmit,
1171         .ndo_change_rx_flags    = dsa_slave_change_rx_flags,
1172         .ndo_set_rx_mode        = dsa_slave_set_rx_mode,
1173         .ndo_set_mac_address    = dsa_slave_set_mac_address,
1174         .ndo_fdb_add            = dsa_legacy_fdb_add,
1175         .ndo_fdb_del            = dsa_legacy_fdb_del,
1176         .ndo_fdb_dump           = dsa_slave_fdb_dump,
1177         .ndo_do_ioctl           = dsa_slave_ioctl,
1178         .ndo_get_iflink         = dsa_slave_get_iflink,
1179 #ifdef CONFIG_NET_POLL_CONTROLLER
1180         .ndo_netpoll_setup      = dsa_slave_netpoll_setup,
1181         .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
1182         .ndo_poll_controller    = dsa_slave_poll_controller,
1183 #endif
1184         .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
1185         .ndo_setup_tc           = dsa_slave_setup_tc,
1186         .ndo_get_stats64        = dsa_slave_get_stats64,
1187         .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
1188         .ndo_vlan_rx_add_vid    = dsa_slave_vlan_rx_add_vid,
1189         .ndo_vlan_rx_kill_vid   = dsa_slave_vlan_rx_kill_vid,
1190         .ndo_get_devlink_port   = dsa_slave_get_devlink_port,
1191 };
1192
1193 static struct device_type dsa_type = {
1194         .name   = "dsa",
1195 };
1196
1197 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1198 {
1199         const struct dsa_port *dp = dsa_to_port(ds, port);
1200
1201         phylink_mac_change(dp->pl, up);
1202 }
1203 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1204
1205 static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1206                                           struct phylink_link_state *state)
1207 {
1208         struct dsa_port *dp = dsa_slave_to_port(dev);
1209         struct dsa_switch *ds = dp->ds;
1210
1211         /* No need to check that this operation is valid, the callback would
1212          * not be called if it was not.
1213          */
1214         ds->ops->phylink_fixed_state(ds, dp->index, state);
1215 }
1216
1217 /* slave device setup *******************************************************/
1218 static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
1219 {
1220         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1221         struct dsa_switch *ds = dp->ds;
1222
1223         slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1224         if (!slave_dev->phydev) {
1225                 netdev_err(slave_dev, "no phy at %d\n", addr);
1226                 return -ENODEV;
1227         }
1228
1229         return phylink_connect_phy(dp->pl, slave_dev->phydev);
1230 }
1231
1232 static int dsa_slave_phy_setup(struct net_device *slave_dev)
1233 {
1234         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1235         struct device_node *port_dn = dp->dn;
1236         struct dsa_switch *ds = dp->ds;
1237         u32 phy_flags = 0;
1238         int mode, ret;
1239
1240         mode = of_get_phy_mode(port_dn);
1241         if (mode < 0)
1242                 mode = PHY_INTERFACE_MODE_NA;
1243
1244         dp->pl_config.dev = &slave_dev->dev;
1245         dp->pl_config.type = PHYLINK_NETDEV;
1246
1247         dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
1248                                 &dsa_port_phylink_mac_ops);
1249         if (IS_ERR(dp->pl)) {
1250                 netdev_err(slave_dev,
1251                            "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1252                 return PTR_ERR(dp->pl);
1253         }
1254
1255         /* Register only if the switch provides such a callback, since this
1256          * callback takes precedence over polling the link GPIO in PHYLINK
1257          * (see phylink_get_fixed_state).
1258          */
1259         if (ds->ops->phylink_fixed_state)
1260                 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1261
1262         if (ds->ops->get_phy_flags)
1263                 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
1264
1265         ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
1266         if (ret == -ENODEV && ds->slave_mii_bus) {
1267                 /* We could not connect to a designated PHY or SFP, so try to
1268                  * use the switch internal MDIO bus instead
1269                  */
1270                 ret = dsa_slave_phy_connect(slave_dev, dp->index);
1271                 if (ret) {
1272                         netdev_err(slave_dev,
1273                                    "failed to connect to port %d: %d\n",
1274                                    dp->index, ret);
1275                         phylink_destroy(dp->pl);
1276                         return ret;
1277                 }
1278         }
1279
1280         return ret;
1281 }
1282
1283 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1284 static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1285                                             struct netdev_queue *txq,
1286                                             void *_unused)
1287 {
1288         lockdep_set_class(&txq->_xmit_lock,
1289                           &dsa_slave_netdev_xmit_lock_key);
1290 }
1291
1292 int dsa_slave_suspend(struct net_device *slave_dev)
1293 {
1294         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1295
1296         if (!netif_running(slave_dev))
1297                 return 0;
1298
1299         cancel_work_sync(&dp->xmit_work);
1300         skb_queue_purge(&dp->xmit_queue);
1301
1302         netif_device_detach(slave_dev);
1303
1304         rtnl_lock();
1305         phylink_stop(dp->pl);
1306         rtnl_unlock();
1307
1308         return 0;
1309 }
1310
1311 int dsa_slave_resume(struct net_device *slave_dev)
1312 {
1313         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1314
1315         if (!netif_running(slave_dev))
1316                 return 0;
1317
1318         netif_device_attach(slave_dev);
1319
1320         rtnl_lock();
1321         phylink_start(dp->pl);
1322         rtnl_unlock();
1323
1324         return 0;
1325 }
1326
1327 static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1328 {
1329         struct net_device *master = dsa_slave_to_master(dev);
1330         struct dsa_port *dp = dsa_slave_to_port(dev);
1331         struct dsa_notifier_register_info rinfo = {
1332                 .switch_number = dp->ds->index,
1333                 .port_number = dp->index,
1334                 .master = master,
1335                 .info.dev = dev,
1336         };
1337
1338         call_dsa_notifiers(val, dev, &rinfo.info);
1339 }
1340
1341 int dsa_slave_create(struct dsa_port *port)
1342 {
1343         const struct dsa_port *cpu_dp = port->cpu_dp;
1344         struct net_device *master = cpu_dp->master;
1345         struct dsa_switch *ds = port->ds;
1346         const char *name = port->name;
1347         struct net_device *slave_dev;
1348         struct dsa_slave_priv *p;
1349         int ret;
1350
1351         if (!ds->num_tx_queues)
1352                 ds->num_tx_queues = 1;
1353
1354         slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1355                                      NET_NAME_UNKNOWN, ether_setup,
1356                                      ds->num_tx_queues, 1);
1357         if (slave_dev == NULL)
1358                 return -ENOMEM;
1359
1360         slave_dev->features = master->vlan_features | NETIF_F_HW_TC |
1361                                 NETIF_F_HW_VLAN_CTAG_FILTER;
1362         slave_dev->hw_features |= NETIF_F_HW_TC;
1363         slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
1364         if (!IS_ERR_OR_NULL(port->mac))
1365                 ether_addr_copy(slave_dev->dev_addr, port->mac);
1366         else
1367                 eth_hw_addr_inherit(slave_dev, master);
1368         slave_dev->priv_flags |= IFF_NO_QUEUE;
1369         slave_dev->netdev_ops = &dsa_slave_netdev_ops;
1370         slave_dev->min_mtu = 0;
1371         slave_dev->max_mtu = ETH_MAX_MTU;
1372         SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
1373
1374         netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1375                                  NULL);
1376
1377         SET_NETDEV_DEV(slave_dev, port->ds->dev);
1378         slave_dev->dev.of_node = port->dn;
1379         slave_dev->vlan_features = master->vlan_features;
1380
1381         p = netdev_priv(slave_dev);
1382         p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1383         if (!p->stats64) {
1384                 free_netdev(slave_dev);
1385                 return -ENOMEM;
1386         }
1387         p->dp = port;
1388         INIT_LIST_HEAD(&p->mall_tc_list);
1389         INIT_WORK(&port->xmit_work, dsa_port_xmit_work);
1390         skb_queue_head_init(&port->xmit_queue);
1391         p->xmit = cpu_dp->tag_ops->xmit;
1392         port->slave = slave_dev;
1393
1394         netif_carrier_off(slave_dev);
1395
1396         ret = dsa_slave_phy_setup(slave_dev);
1397         if (ret) {
1398                 netdev_err(master, "error %d setting up slave phy\n", ret);
1399                 goto out_free;
1400         }
1401
1402         dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
1403
1404         ret = register_netdev(slave_dev);
1405         if (ret) {
1406                 netdev_err(master, "error %d registering interface %s\n",
1407                            ret, slave_dev->name);
1408                 goto out_phy;
1409         }
1410
1411         return 0;
1412
1413 out_phy:
1414         rtnl_lock();
1415         phylink_disconnect_phy(p->dp->pl);
1416         rtnl_unlock();
1417         phylink_destroy(p->dp->pl);
1418 out_free:
1419         free_percpu(p->stats64);
1420         free_netdev(slave_dev);
1421         port->slave = NULL;
1422         return ret;
1423 }
1424
1425 void dsa_slave_destroy(struct net_device *slave_dev)
1426 {
1427         struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1428         struct dsa_slave_priv *p = netdev_priv(slave_dev);
1429
1430         netif_carrier_off(slave_dev);
1431         rtnl_lock();
1432         phylink_disconnect_phy(dp->pl);
1433         rtnl_unlock();
1434
1435         dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
1436         unregister_netdev(slave_dev);
1437         phylink_destroy(dp->pl);
1438         free_percpu(p->stats64);
1439         free_netdev(slave_dev);
1440 }
1441
1442 static bool dsa_slave_dev_check(const struct net_device *dev)
1443 {
1444         return dev->netdev_ops == &dsa_slave_netdev_ops;
1445 }
1446
1447 static int dsa_slave_changeupper(struct net_device *dev,
1448                                  struct netdev_notifier_changeupper_info *info)
1449 {
1450         struct dsa_port *dp = dsa_slave_to_port(dev);
1451         int err = NOTIFY_DONE;
1452
1453         if (netif_is_bridge_master(info->upper_dev)) {
1454                 if (info->linking) {
1455                         err = dsa_port_bridge_join(dp, info->upper_dev);
1456                         err = notifier_from_errno(err);
1457                 } else {
1458                         dsa_port_bridge_leave(dp, info->upper_dev);
1459                         err = NOTIFY_OK;
1460                 }
1461         }
1462
1463         return err;
1464 }
1465
1466 static int dsa_slave_upper_vlan_check(struct net_device *dev,
1467                                       struct netdev_notifier_changeupper_info *
1468                                       info)
1469 {
1470         struct netlink_ext_ack *ext_ack;
1471         struct net_device *slave;
1472         struct dsa_port *dp;
1473
1474         ext_ack = netdev_notifier_info_to_extack(&info->info);
1475
1476         if (!is_vlan_dev(dev))
1477                 return NOTIFY_DONE;
1478
1479         slave = vlan_dev_real_dev(dev);
1480         if (!dsa_slave_dev_check(slave))
1481                 return NOTIFY_DONE;
1482
1483         dp = dsa_slave_to_port(slave);
1484         if (!dp->bridge_dev)
1485                 return NOTIFY_DONE;
1486
1487         /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1488         if (br_vlan_enabled(dp->bridge_dev) &&
1489             netif_is_bridge_master(info->upper_dev) && info->linking) {
1490                 NL_SET_ERR_MSG_MOD(ext_ack,
1491                                    "Cannot enslave VLAN device into VLAN aware bridge");
1492                 return notifier_from_errno(-EINVAL);
1493         }
1494
1495         return NOTIFY_DONE;
1496 }
1497
1498 static int dsa_slave_netdevice_event(struct notifier_block *nb,
1499                                      unsigned long event, void *ptr)
1500 {
1501         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1502
1503         if (event == NETDEV_CHANGEUPPER) {
1504                 if (!dsa_slave_dev_check(dev))
1505                         return dsa_slave_upper_vlan_check(dev, ptr);
1506
1507                 return dsa_slave_changeupper(dev, ptr);
1508         }
1509
1510         return NOTIFY_DONE;
1511 }
1512
1513 struct dsa_switchdev_event_work {
1514         struct work_struct work;
1515         struct switchdev_notifier_fdb_info fdb_info;
1516         struct net_device *dev;
1517         unsigned long event;
1518 };
1519
1520 static void dsa_slave_switchdev_event_work(struct work_struct *work)
1521 {
1522         struct dsa_switchdev_event_work *switchdev_work =
1523                 container_of(work, struct dsa_switchdev_event_work, work);
1524         struct net_device *dev = switchdev_work->dev;
1525         struct switchdev_notifier_fdb_info *fdb_info;
1526         struct dsa_port *dp = dsa_slave_to_port(dev);
1527         int err;
1528
1529         rtnl_lock();
1530         switch (switchdev_work->event) {
1531         case SWITCHDEV_FDB_ADD_TO_DEVICE:
1532                 fdb_info = &switchdev_work->fdb_info;
1533                 if (!fdb_info->added_by_user)
1534                         break;
1535
1536                 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
1537                 if (err) {
1538                         netdev_dbg(dev, "fdb add failed err=%d\n", err);
1539                         break;
1540                 }
1541                 fdb_info->offloaded = true;
1542                 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
1543                                          &fdb_info->info, NULL);
1544                 break;
1545
1546         case SWITCHDEV_FDB_DEL_TO_DEVICE:
1547                 fdb_info = &switchdev_work->fdb_info;
1548                 if (!fdb_info->added_by_user)
1549                         break;
1550
1551                 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
1552                 if (err) {
1553                         netdev_dbg(dev, "fdb del failed err=%d\n", err);
1554                         dev_close(dev);
1555                 }
1556                 break;
1557         }
1558         rtnl_unlock();
1559
1560         kfree(switchdev_work->fdb_info.addr);
1561         kfree(switchdev_work);
1562         dev_put(dev);
1563 }
1564
1565 static int
1566 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1567                                   switchdev_work,
1568                                   const struct switchdev_notifier_fdb_info *
1569                                   fdb_info)
1570 {
1571         memcpy(&switchdev_work->fdb_info, fdb_info,
1572                sizeof(switchdev_work->fdb_info));
1573         switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1574         if (!switchdev_work->fdb_info.addr)
1575                 return -ENOMEM;
1576         ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1577                         fdb_info->addr);
1578         return 0;
1579 }
1580
1581 /* Called under rcu_read_lock() */
1582 static int dsa_slave_switchdev_event(struct notifier_block *unused,
1583                                      unsigned long event, void *ptr)
1584 {
1585         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1586         struct dsa_switchdev_event_work *switchdev_work;
1587         int err;
1588
1589         if (event == SWITCHDEV_PORT_ATTR_SET) {
1590                 err = switchdev_handle_port_attr_set(dev, ptr,
1591                                                      dsa_slave_dev_check,
1592                                                      dsa_slave_port_attr_set);
1593                 return notifier_from_errno(err);
1594         }
1595
1596         if (!dsa_slave_dev_check(dev))
1597                 return NOTIFY_DONE;
1598
1599         switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1600         if (!switchdev_work)
1601                 return NOTIFY_BAD;
1602
1603         INIT_WORK(&switchdev_work->work,
1604                   dsa_slave_switchdev_event_work);
1605         switchdev_work->dev = dev;
1606         switchdev_work->event = event;
1607
1608         switch (event) {
1609         case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1610         case SWITCHDEV_FDB_DEL_TO_DEVICE:
1611                 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
1612                         goto err_fdb_work_init;
1613                 dev_hold(dev);
1614                 break;
1615         default:
1616                 kfree(switchdev_work);
1617                 return NOTIFY_DONE;
1618         }
1619
1620         dsa_schedule_work(&switchdev_work->work);
1621         return NOTIFY_OK;
1622
1623 err_fdb_work_init:
1624         kfree(switchdev_work);
1625         return NOTIFY_BAD;
1626 }
1627
1628 static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
1629                                               unsigned long event, void *ptr)
1630 {
1631         struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1632         int err;
1633
1634         switch (event) {
1635         case SWITCHDEV_PORT_OBJ_ADD:
1636                 err = switchdev_handle_port_obj_add(dev, ptr,
1637                                                     dsa_slave_dev_check,
1638                                                     dsa_slave_port_obj_add);
1639                 return notifier_from_errno(err);
1640         case SWITCHDEV_PORT_OBJ_DEL:
1641                 err = switchdev_handle_port_obj_del(dev, ptr,
1642                                                     dsa_slave_dev_check,
1643                                                     dsa_slave_port_obj_del);
1644                 return notifier_from_errno(err);
1645         case SWITCHDEV_PORT_ATTR_SET:
1646                 err = switchdev_handle_port_attr_set(dev, ptr,
1647                                                      dsa_slave_dev_check,
1648                                                      dsa_slave_port_attr_set);
1649                 return notifier_from_errno(err);
1650         }
1651
1652         return NOTIFY_DONE;
1653 }
1654
1655 static struct notifier_block dsa_slave_nb __read_mostly = {
1656         .notifier_call  = dsa_slave_netdevice_event,
1657 };
1658
1659 static struct notifier_block dsa_slave_switchdev_notifier = {
1660         .notifier_call = dsa_slave_switchdev_event,
1661 };
1662
1663 static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
1664         .notifier_call = dsa_slave_switchdev_blocking_event,
1665 };
1666
1667 int dsa_slave_register_notifier(void)
1668 {
1669         struct notifier_block *nb;
1670         int err;
1671
1672         err = register_netdevice_notifier(&dsa_slave_nb);
1673         if (err)
1674                 return err;
1675
1676         err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1677         if (err)
1678                 goto err_switchdev_nb;
1679
1680         nb = &dsa_slave_switchdev_blocking_notifier;
1681         err = register_switchdev_blocking_notifier(nb);
1682         if (err)
1683                 goto err_switchdev_blocking_nb;
1684
1685         return 0;
1686
1687 err_switchdev_blocking_nb:
1688         unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1689 err_switchdev_nb:
1690         unregister_netdevice_notifier(&dsa_slave_nb);
1691         return err;
1692 }
1693
1694 void dsa_slave_unregister_notifier(void)
1695 {
1696         struct notifier_block *nb;
1697         int err;
1698
1699         nb = &dsa_slave_switchdev_blocking_notifier;
1700         err = unregister_switchdev_blocking_notifier(nb);
1701         if (err)
1702                 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
1703
1704         err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1705         if (err)
1706                 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1707
1708         err = unregister_netdevice_notifier(&dsa_slave_nb);
1709         if (err)
1710                 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1711 }