de30763e1999e8d0c9bc5d2287c1cef01e452aeb
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
56
57 #include "spectrum.h"
58 #include "core.h"
59 #include "reg.h"
60 #include "port.h"
61 #include "trap.h"
62 #include "txheader.h"
63
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
66
67 /* tx_hdr_version
68  * Tx header version.
69  * Must be set to 1.
70  */
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
72
73 /* tx_hdr_ctl
74  * Packet control type.
75  * 0 - Ethernet control (e.g. EMADs, LACP)
76  * 1 - Ethernet data
77  */
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
79
80 /* tx_hdr_proto
81  * Packet protocol type. Must be set to 1 (Ethernet).
82  */
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
84
85 /* tx_hdr_rx_is_router
86  * Packet is sent from the router. Valid for data packets only.
87  */
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
89
90 /* tx_hdr_fid_valid
91  * Indicates if the 'fid' field is valid and should be used for
92  * forwarding lookup. Valid for data packets only.
93  */
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
95
96 /* tx_hdr_swid
97  * Switch partition ID. Must be set to 0.
98  */
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
100
101 /* tx_hdr_control_tclass
102  * Indicates if the packet should use the control TClass and not one
103  * of the data TClasses.
104  */
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
106
107 /* tx_hdr_etclass
108  * Egress TClass to be used on the egress device on the egress port.
109  */
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
111
112 /* tx_hdr_port_mid
113  * Destination local port for unicast packets.
114  * Destination multicast ID for multicast packets.
115  *
116  * Control packets are directed to a specific egress port, while data
117  * packets are transmitted through the CPU port (0) into the switch partition,
118  * where forwarding rules are applied.
119  */
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
121
122 /* tx_hdr_fid
123  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125  * Valid for data packets only.
126  */
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
128
129 /* tx_hdr_type
130  * 0 - Data packets
131  * 6 - Control packets
132  */
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
134
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136                                      const struct mlxsw_tx_info *tx_info)
137 {
138         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
139
140         memset(txhdr, 0, MLXSW_TXHDR_LEN);
141
142         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145         mlxsw_tx_hdr_swid_set(txhdr, 0);
146         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
149 }
150
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
152 {
153         char spad_pl[MLXSW_REG_SPAD_LEN];
154         int err;
155
156         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
157         if (err)
158                 return err;
159         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
160         return 0;
161 }
162
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
164                                           bool is_up)
165 {
166         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167         char paos_pl[MLXSW_REG_PAOS_LEN];
168
169         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171                             MLXSW_PORT_ADMIN_STATUS_DOWN);
172         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
173 }
174
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
176                                          bool *p_is_up)
177 {
178         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179         char paos_pl[MLXSW_REG_PAOS_LEN];
180         u8 oper_status;
181         int err;
182
183         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
185         if (err)
186                 return err;
187         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
189         return 0;
190 }
191
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
193                                       unsigned char *addr)
194 {
195         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196         char ppad_pl[MLXSW_REG_PPAD_LEN];
197
198         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
201 }
202
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
207
208         ether_addr_copy(addr, mlxsw_sp->base_mac);
209         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211 }
212
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214                                        u16 vid, enum mlxsw_reg_spms_state state)
215 {
216         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217         char *spms_pl;
218         int err;
219
220         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221         if (!spms_pl)
222                 return -ENOMEM;
223         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226         kfree(spms_pl);
227         return err;
228 }
229
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231 {
232         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233         char pmtu_pl[MLXSW_REG_PMTU_LEN];
234         int max_mtu;
235         int err;
236
237         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
240         if (err)
241                 return err;
242         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
243
244         if (mtu > max_mtu)
245                 return -EINVAL;
246
247         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
249 }
250
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
252                                     u8 swid)
253 {
254         char pspa_pl[MLXSW_REG_PSPA_LEN];
255
256         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
258 }
259
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
261 {
262         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263
264         return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
265                                         swid);
266 }
267
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
269                                      bool enable)
270 {
271         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272         char svpe_pl[MLXSW_REG_SVPE_LEN];
273
274         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
276 }
277
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
280                                  u16 vid)
281 {
282         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283         char svfa_pl[MLXSW_REG_SVFA_LEN];
284
285         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
286                             fid, vid);
287         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
288 }
289
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291                                           u16 vid, bool learn_enable)
292 {
293         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
294         char *spvmlr_pl;
295         int err;
296
297         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
298         if (!spvmlr_pl)
299                 return -ENOMEM;
300         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
301                               learn_enable);
302         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
303         kfree(spvmlr_pl);
304         return err;
305 }
306
307 static int
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
309 {
310         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311         char sspr_pl[MLXSW_REG_SSPR_LEN];
312
313         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
315 }
316
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318                                          u8 local_port, u8 *p_module,
319                                          u8 *p_width, u8 *p_lane)
320 {
321         char pmlp_pl[MLXSW_REG_PMLP_LEN];
322         int err;
323
324         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
326         if (err)
327                 return err;
328         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
331         return 0;
332 }
333
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335                                     u8 module, u8 width, u8 lane)
336 {
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int i;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342         for (i = 0; i < width; i++) {
343                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
345         }
346
347         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
348 }
349
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
351 {
352         char pmlp_pl[MLXSW_REG_PMLP_LEN];
353
354         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
357 }
358
359 static int mlxsw_sp_port_open(struct net_device *dev)
360 {
361         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
362         int err;
363
364         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
365         if (err)
366                 return err;
367         netif_start_queue(dev);
368         return 0;
369 }
370
371 static int mlxsw_sp_port_stop(struct net_device *dev)
372 {
373         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
374
375         netif_stop_queue(dev);
376         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
377 }
378
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380                                       struct net_device *dev)
381 {
382         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385         const struct mlxsw_tx_info tx_info = {
386                 .local_port = mlxsw_sp_port->local_port,
387                 .is_emad = false,
388         };
389         u64 len;
390         int err;
391
392         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393                 return NETDEV_TX_BUSY;
394
395         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396                 struct sk_buff *skb_orig = skb;
397
398                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
399                 if (!skb) {
400                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401                         dev_kfree_skb_any(skb_orig);
402                         return NETDEV_TX_OK;
403                 }
404         }
405
406         if (eth_skb_pad(skb)) {
407                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
408                 return NETDEV_TX_OK;
409         }
410
411         mlxsw_sp_txhdr_construct(skb, &tx_info);
412         /* TX header is consumed by HW on the way so we shouldn't count its
413          * bytes as being sent.
414          */
415         len = skb->len - MLXSW_TXHDR_LEN;
416
417         /* Due to a race we might fail here because of a full queue. In that
418          * unlikely case we simply drop the packet.
419          */
420         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
421
422         if (!err) {
423                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
424                 u64_stats_update_begin(&pcpu_stats->syncp);
425                 pcpu_stats->tx_packets++;
426                 pcpu_stats->tx_bytes += len;
427                 u64_stats_update_end(&pcpu_stats->syncp);
428         } else {
429                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
430                 dev_kfree_skb_any(skb);
431         }
432         return NETDEV_TX_OK;
433 }
434
435 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
436 {
437 }
438
439 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
440 {
441         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
442         struct sockaddr *addr = p;
443         int err;
444
445         if (!is_valid_ether_addr(addr->sa_data))
446                 return -EADDRNOTAVAIL;
447
448         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
449         if (err)
450                 return err;
451         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
452         return 0;
453 }
454
455 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
456                                  bool pause_en, bool pfc_en, u16 delay)
457 {
458         u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
459
460         delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
461                          MLXSW_SP_PAUSE_DELAY;
462
463         if (pause_en || pfc_en)
464                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
465                                                     pg_size + delay, pg_size);
466         else
467                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
468 }
469
470 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
471                                  u8 *prio_tc, bool pause_en,
472                                  struct ieee_pfc *my_pfc)
473 {
474         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
475         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
476         u16 delay = !!my_pfc ? my_pfc->delay : 0;
477         char pbmc_pl[MLXSW_REG_PBMC_LEN];
478         int i, j, err;
479
480         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
481         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
482         if (err)
483                 return err;
484
485         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
486                 bool configure = false;
487                 bool pfc = false;
488
489                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
490                         if (prio_tc[j] == i) {
491                                 pfc = pfc_en & BIT(j);
492                                 configure = true;
493                                 break;
494                         }
495                 }
496
497                 if (!configure)
498                         continue;
499                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
500         }
501
502         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
503 }
504
505 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
506                                       int mtu, bool pause_en)
507 {
508         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
509         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
510         struct ieee_pfc *my_pfc;
511         u8 *prio_tc;
512
513         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
514         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
515
516         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
517                                             pause_en, my_pfc);
518 }
519
520 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
521 {
522         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
524         int err;
525
526         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
527         if (err)
528                 return err;
529         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
530         if (err)
531                 goto err_port_mtu_set;
532         dev->mtu = mtu;
533         return 0;
534
535 err_port_mtu_set:
536         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
537         return err;
538 }
539
540 static struct rtnl_link_stats64 *
541 mlxsw_sp_port_get_stats64(struct net_device *dev,
542                           struct rtnl_link_stats64 *stats)
543 {
544         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
545         struct mlxsw_sp_port_pcpu_stats *p;
546         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
547         u32 tx_dropped = 0;
548         unsigned int start;
549         int i;
550
551         for_each_possible_cpu(i) {
552                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
553                 do {
554                         start = u64_stats_fetch_begin_irq(&p->syncp);
555                         rx_packets      = p->rx_packets;
556                         rx_bytes        = p->rx_bytes;
557                         tx_packets      = p->tx_packets;
558                         tx_bytes        = p->tx_bytes;
559                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
560
561                 stats->rx_packets       += rx_packets;
562                 stats->rx_bytes         += rx_bytes;
563                 stats->tx_packets       += tx_packets;
564                 stats->tx_bytes         += tx_bytes;
565                 /* tx_dropped is u32, updated without syncp protection. */
566                 tx_dropped      += p->tx_dropped;
567         }
568         stats->tx_dropped       = tx_dropped;
569         return stats;
570 }
571
572 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
573                            u16 vid_end, bool is_member, bool untagged)
574 {
575         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576         char *spvm_pl;
577         int err;
578
579         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
580         if (!spvm_pl)
581                 return -ENOMEM;
582
583         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
584                             vid_end, is_member, untagged);
585         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
586         kfree(spvm_pl);
587         return err;
588 }
589
590 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
591 {
592         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
593         u16 vid, last_visited_vid;
594         int err;
595
596         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
597                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
598                                                    vid);
599                 if (err) {
600                         last_visited_vid = vid;
601                         goto err_port_vid_to_fid_set;
602                 }
603         }
604
605         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
606         if (err) {
607                 last_visited_vid = VLAN_N_VID;
608                 goto err_port_vid_to_fid_set;
609         }
610
611         return 0;
612
613 err_port_vid_to_fid_set:
614         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
615                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
616                                              vid);
617         return err;
618 }
619
620 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
621 {
622         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
623         u16 vid;
624         int err;
625
626         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
627         if (err)
628                 return err;
629
630         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
631                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
632                                                    vid, vid);
633                 if (err)
634                         return err;
635         }
636
637         return 0;
638 }
639
640 static struct mlxsw_sp_fid *
641 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
642 {
643         struct mlxsw_sp_fid *f;
644
645         list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
646                 if (f->vid == vid)
647                         return f;
648         }
649
650         return NULL;
651 }
652
653 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
654 {
655         return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
656                                    MLXSW_SP_VFID_PORT_MAX);
657 }
658
659 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
660 {
661         char sfmr_pl[MLXSW_REG_SFMR_LEN];
662
663         mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
664         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
665 }
666
667 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
668
669 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
670                                                  u16 vid)
671 {
672         struct device *dev = mlxsw_sp->bus_info->dev;
673         struct mlxsw_sp_fid *f;
674         u16 vfid, fid;
675         int err;
676
677         vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
678         if (vfid == MLXSW_SP_VFID_PORT_MAX) {
679                 dev_err(dev, "No available vFIDs\n");
680                 return ERR_PTR(-ERANGE);
681         }
682
683         fid = mlxsw_sp_vfid_to_fid(vfid);
684         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
685         if (err) {
686                 dev_err(dev, "Failed to create FID=%d\n", fid);
687                 return ERR_PTR(err);
688         }
689
690         f = kzalloc(sizeof(*f), GFP_KERNEL);
691         if (!f)
692                 goto err_allocate_vfid;
693
694         f->leave = mlxsw_sp_vport_vfid_leave;
695         f->fid = fid;
696         f->vid = vid;
697
698         list_add(&f->list, &mlxsw_sp->port_vfids.list);
699         set_bit(vfid, mlxsw_sp->port_vfids.mapped);
700
701         return f;
702
703 err_allocate_vfid:
704         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
705         return ERR_PTR(-ENOMEM);
706 }
707
708 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
709                                   struct mlxsw_sp_fid *f)
710 {
711         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
712
713         clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
714         list_del(&f->list);
715
716         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
717
718         kfree(f);
719 }
720
721 static struct mlxsw_sp_port *
722 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
723 {
724         struct mlxsw_sp_port *mlxsw_sp_vport;
725
726         mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
727         if (!mlxsw_sp_vport)
728                 return NULL;
729
730         /* dev will be set correctly after the VLAN device is linked
731          * with the real device. In case of bridge SELF invocation, dev
732          * will remain as is.
733          */
734         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
735         mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
736         mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
737         mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
738         mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
739         mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
740         mlxsw_sp_vport->vport.vid = vid;
741
742         list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
743
744         return mlxsw_sp_vport;
745 }
746
747 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
748 {
749         list_del(&mlxsw_sp_vport->vport.list);
750         kfree(mlxsw_sp_vport);
751 }
752
753 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
754                                   bool valid)
755 {
756         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
757         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
758
759         return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
760                                             vid);
761 }
762
763 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
764 {
765         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
766         struct mlxsw_sp_fid *f;
767         int err;
768
769         f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
770         if (!f) {
771                 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
772                 if (IS_ERR(f))
773                         return PTR_ERR(f);
774         }
775
776         if (!f->ref_count) {
777                 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
778                 if (err)
779                         goto err_vport_flood_set;
780         }
781
782         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
783         if (err)
784                 goto err_vport_fid_map;
785
786         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
787         f->ref_count++;
788
789         return 0;
790
791 err_vport_fid_map:
792         if (!f->ref_count)
793                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
794 err_vport_flood_set:
795         if (!f->ref_count)
796                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
797         return err;
798 }
799
800 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
801 {
802         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
803
804         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
805
806         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
807
808         if (--f->ref_count == 0) {
809                 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
810                 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
811         }
812 }
813
814 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
815                           u16 vid)
816 {
817         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
818         struct mlxsw_sp_port *mlxsw_sp_vport;
819         bool untagged = vid == 1;
820         int err;
821
822         /* VLAN 0 is added to HW filter when device goes up, but it is
823          * reserved in our case, so simply return.
824          */
825         if (!vid)
826                 return 0;
827
828         if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
829                 netdev_warn(dev, "VID=%d already configured\n", vid);
830                 return 0;
831         }
832
833         mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
834         if (!mlxsw_sp_vport) {
835                 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
836                 return -ENOMEM;
837         }
838
839         /* When adding the first VLAN interface on a bridged port we need to
840          * transition all the active 802.1Q bridge VLANs to use explicit
841          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
842          */
843         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
844                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
845                 if (err) {
846                         netdev_err(dev, "Failed to set to Virtual mode\n");
847                         goto err_port_vp_mode_trans;
848                 }
849         }
850
851         err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
852         if (err) {
853                 netdev_err(dev, "Failed to join vFID\n");
854                 goto err_vport_vfid_join;
855         }
856
857         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
858         if (err) {
859                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
860                 goto err_port_vid_learning_set;
861         }
862
863         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
864         if (err) {
865                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
866                            vid);
867                 goto err_port_add_vid;
868         }
869
870         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
871                                           MLXSW_REG_SPMS_STATE_FORWARDING);
872         if (err) {
873                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
874                 goto err_port_stp_state_set;
875         }
876
877         return 0;
878
879 err_port_stp_state_set:
880         mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
881 err_port_add_vid:
882         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
883 err_port_vid_learning_set:
884         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
885 err_vport_vfid_join:
886         if (list_is_singular(&mlxsw_sp_port->vports_list))
887                 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
888 err_port_vp_mode_trans:
889         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
890         return err;
891 }
892
893 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
894                                   __be16 __always_unused proto, u16 vid)
895 {
896         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
897         struct mlxsw_sp_port *mlxsw_sp_vport;
898         struct mlxsw_sp_fid *f;
899         int err;
900
901         /* VLAN 0 is removed from HW filter when device goes down, but
902          * it is reserved in our case, so simply return.
903          */
904         if (!vid)
905                 return 0;
906
907         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
908         if (!mlxsw_sp_vport) {
909                 netdev_warn(dev, "VID=%d does not exist\n", vid);
910                 return 0;
911         }
912
913         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
914                                           MLXSW_REG_SPMS_STATE_DISCARDING);
915         if (err) {
916                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
917                 return err;
918         }
919
920         err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
921         if (err) {
922                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
923                            vid);
924                 return err;
925         }
926
927         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
928         if (err) {
929                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
930                 return err;
931         }
932
933         /* Drop FID reference. If this was the last reference the
934          * resources will be freed.
935          */
936         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
937         if (f && !WARN_ON(!f->leave))
938                 f->leave(mlxsw_sp_vport);
939
940         /* When removing the last VLAN interface on a bridged port we need to
941          * transition all active 802.1Q bridge VLANs to use VID to FID
942          * mappings and set port's mode to VLAN mode.
943          */
944         if (list_is_singular(&mlxsw_sp_port->vports_list)) {
945                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
946                 if (err) {
947                         netdev_err(dev, "Failed to set to VLAN mode\n");
948                         return err;
949                 }
950         }
951
952         mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
953
954         return 0;
955 }
956
957 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
958                                             size_t len)
959 {
960         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
961         u8 module = mlxsw_sp_port->mapping.module;
962         u8 width = mlxsw_sp_port->mapping.width;
963         u8 lane = mlxsw_sp_port->mapping.lane;
964         int err;
965
966         if (!mlxsw_sp_port->split)
967                 err = snprintf(name, len, "p%d", module + 1);
968         else
969                 err = snprintf(name, len, "p%ds%d", module + 1,
970                                lane / width);
971
972         if (err >= len)
973                 return -EINVAL;
974
975         return 0;
976 }
977
978 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
979         .ndo_open               = mlxsw_sp_port_open,
980         .ndo_stop               = mlxsw_sp_port_stop,
981         .ndo_start_xmit         = mlxsw_sp_port_xmit,
982         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
983         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
984         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
985         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
986         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
987         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
988         .ndo_fdb_add            = switchdev_port_fdb_add,
989         .ndo_fdb_del            = switchdev_port_fdb_del,
990         .ndo_fdb_dump           = switchdev_port_fdb_dump,
991         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
992         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
993         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
994         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
995 };
996
997 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
998                                       struct ethtool_drvinfo *drvinfo)
999 {
1000         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1001         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1002
1003         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1004         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1005                 sizeof(drvinfo->version));
1006         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1007                  "%d.%d.%d",
1008                  mlxsw_sp->bus_info->fw_rev.major,
1009                  mlxsw_sp->bus_info->fw_rev.minor,
1010                  mlxsw_sp->bus_info->fw_rev.subminor);
1011         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1012                 sizeof(drvinfo->bus_info));
1013 }
1014
1015 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1016                                          struct ethtool_pauseparam *pause)
1017 {
1018         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1019
1020         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1021         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1022 }
1023
1024 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1025                                    struct ethtool_pauseparam *pause)
1026 {
1027         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1028
1029         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1030         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1031         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1032
1033         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1034                                pfcc_pl);
1035 }
1036
1037 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1038                                         struct ethtool_pauseparam *pause)
1039 {
1040         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1041         bool pause_en = pause->tx_pause || pause->rx_pause;
1042         int err;
1043
1044         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1045                 netdev_err(dev, "PFC already enabled on port\n");
1046                 return -EINVAL;
1047         }
1048
1049         if (pause->autoneg) {
1050                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1051                 return -EINVAL;
1052         }
1053
1054         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1055         if (err) {
1056                 netdev_err(dev, "Failed to configure port's headroom\n");
1057                 return err;
1058         }
1059
1060         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1061         if (err) {
1062                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1063                 goto err_port_pause_configure;
1064         }
1065
1066         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1067         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1068
1069         return 0;
1070
1071 err_port_pause_configure:
1072         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1073         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1074         return err;
1075 }
1076
1077 struct mlxsw_sp_port_hw_stats {
1078         char str[ETH_GSTRING_LEN];
1079         u64 (*getter)(char *payload);
1080 };
1081
1082 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1083         {
1084                 .str = "a_frames_transmitted_ok",
1085                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1086         },
1087         {
1088                 .str = "a_frames_received_ok",
1089                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1090         },
1091         {
1092                 .str = "a_frame_check_sequence_errors",
1093                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1094         },
1095         {
1096                 .str = "a_alignment_errors",
1097                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1098         },
1099         {
1100                 .str = "a_octets_transmitted_ok",
1101                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1102         },
1103         {
1104                 .str = "a_octets_received_ok",
1105                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1106         },
1107         {
1108                 .str = "a_multicast_frames_xmitted_ok",
1109                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1110         },
1111         {
1112                 .str = "a_broadcast_frames_xmitted_ok",
1113                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1114         },
1115         {
1116                 .str = "a_multicast_frames_received_ok",
1117                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1118         },
1119         {
1120                 .str = "a_broadcast_frames_received_ok",
1121                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1122         },
1123         {
1124                 .str = "a_in_range_length_errors",
1125                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1126         },
1127         {
1128                 .str = "a_out_of_range_length_field",
1129                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1130         },
1131         {
1132                 .str = "a_frame_too_long_errors",
1133                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1134         },
1135         {
1136                 .str = "a_symbol_error_during_carrier",
1137                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1138         },
1139         {
1140                 .str = "a_mac_control_frames_transmitted",
1141                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1142         },
1143         {
1144                 .str = "a_mac_control_frames_received",
1145                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1146         },
1147         {
1148                 .str = "a_unsupported_opcodes_received",
1149                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1150         },
1151         {
1152                 .str = "a_pause_mac_ctrl_frames_received",
1153                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1154         },
1155         {
1156                 .str = "a_pause_mac_ctrl_frames_xmitted",
1157                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1158         },
1159 };
1160
1161 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1162
1163 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1164                                       u32 stringset, u8 *data)
1165 {
1166         u8 *p = data;
1167         int i;
1168
1169         switch (stringset) {
1170         case ETH_SS_STATS:
1171                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1172                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1173                                ETH_GSTRING_LEN);
1174                         p += ETH_GSTRING_LEN;
1175                 }
1176                 break;
1177         }
1178 }
1179
1180 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1181                                      enum ethtool_phys_id_state state)
1182 {
1183         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1184         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1185         char mlcr_pl[MLXSW_REG_MLCR_LEN];
1186         bool active;
1187
1188         switch (state) {
1189         case ETHTOOL_ID_ACTIVE:
1190                 active = true;
1191                 break;
1192         case ETHTOOL_ID_INACTIVE:
1193                 active = false;
1194                 break;
1195         default:
1196                 return -EOPNOTSUPP;
1197         }
1198
1199         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1200         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1201 }
1202
1203 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1204                                     struct ethtool_stats *stats, u64 *data)
1205 {
1206         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1207         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1208         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1209         int i;
1210         int err;
1211
1212         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1213                              MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1214         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1215         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1216                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1217 }
1218
1219 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1220 {
1221         switch (sset) {
1222         case ETH_SS_STATS:
1223                 return MLXSW_SP_PORT_HW_STATS_LEN;
1224         default:
1225                 return -EOPNOTSUPP;
1226         }
1227 }
1228
1229 struct mlxsw_sp_port_link_mode {
1230         u32 mask;
1231         u32 supported;
1232         u32 advertised;
1233         u32 speed;
1234 };
1235
1236 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1237         {
1238                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1239                 .supported      = SUPPORTED_100baseT_Full,
1240                 .advertised     = ADVERTISED_100baseT_Full,
1241                 .speed          = 100,
1242         },
1243         {
1244                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1245                 .speed          = 100,
1246         },
1247         {
1248                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1249                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1250                 .supported      = SUPPORTED_1000baseKX_Full,
1251                 .advertised     = ADVERTISED_1000baseKX_Full,
1252                 .speed          = 1000,
1253         },
1254         {
1255                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1256                 .supported      = SUPPORTED_10000baseT_Full,
1257                 .advertised     = ADVERTISED_10000baseT_Full,
1258                 .speed          = 10000,
1259         },
1260         {
1261                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1262                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1263                 .supported      = SUPPORTED_10000baseKX4_Full,
1264                 .advertised     = ADVERTISED_10000baseKX4_Full,
1265                 .speed          = 10000,
1266         },
1267         {
1268                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1269                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1270                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1271                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1272                 .supported      = SUPPORTED_10000baseKR_Full,
1273                 .advertised     = ADVERTISED_10000baseKR_Full,
1274                 .speed          = 10000,
1275         },
1276         {
1277                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1278                 .supported      = SUPPORTED_20000baseKR2_Full,
1279                 .advertised     = ADVERTISED_20000baseKR2_Full,
1280                 .speed          = 20000,
1281         },
1282         {
1283                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1284                 .supported      = SUPPORTED_40000baseCR4_Full,
1285                 .advertised     = ADVERTISED_40000baseCR4_Full,
1286                 .speed          = 40000,
1287         },
1288         {
1289                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1290                 .supported      = SUPPORTED_40000baseKR4_Full,
1291                 .advertised     = ADVERTISED_40000baseKR4_Full,
1292                 .speed          = 40000,
1293         },
1294         {
1295                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1296                 .supported      = SUPPORTED_40000baseSR4_Full,
1297                 .advertised     = ADVERTISED_40000baseSR4_Full,
1298                 .speed          = 40000,
1299         },
1300         {
1301                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1302                 .supported      = SUPPORTED_40000baseLR4_Full,
1303                 .advertised     = ADVERTISED_40000baseLR4_Full,
1304                 .speed          = 40000,
1305         },
1306         {
1307                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1308                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1309                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1310                 .speed          = 25000,
1311         },
1312         {
1313                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1314                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1315                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1316                 .speed          = 50000,
1317         },
1318         {
1319                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1320                 .supported      = SUPPORTED_56000baseKR4_Full,
1321                 .advertised     = ADVERTISED_56000baseKR4_Full,
1322                 .speed          = 56000,
1323         },
1324         {
1325                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1326                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1327                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1328                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1329                 .speed          = 100000,
1330         },
1331 };
1332
1333 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1334
1335 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1336 {
1337         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1338                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1339                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1340                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1341                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1342                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1343                 return SUPPORTED_FIBRE;
1344
1345         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1346                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1347                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1348                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1349                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1350                 return SUPPORTED_Backplane;
1351         return 0;
1352 }
1353
1354 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1355 {
1356         u32 modes = 0;
1357         int i;
1358
1359         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1360                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1361                         modes |= mlxsw_sp_port_link_mode[i].supported;
1362         }
1363         return modes;
1364 }
1365
1366 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1367 {
1368         u32 modes = 0;
1369         int i;
1370
1371         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1372                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1373                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1374         }
1375         return modes;
1376 }
1377
1378 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1379                                             struct ethtool_cmd *cmd)
1380 {
1381         u32 speed = SPEED_UNKNOWN;
1382         u8 duplex = DUPLEX_UNKNOWN;
1383         int i;
1384
1385         if (!carrier_ok)
1386                 goto out;
1387
1388         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1389                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1390                         speed = mlxsw_sp_port_link_mode[i].speed;
1391                         duplex = DUPLEX_FULL;
1392                         break;
1393                 }
1394         }
1395 out:
1396         ethtool_cmd_speed_set(cmd, speed);
1397         cmd->duplex = duplex;
1398 }
1399
1400 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1401 {
1402         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1403                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1404                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1405                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1406                 return PORT_FIBRE;
1407
1408         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1409                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1410                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1411                 return PORT_DA;
1412
1413         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1414                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1415                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1416                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1417                 return PORT_NONE;
1418
1419         return PORT_OTHER;
1420 }
1421
1422 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1423                                       struct ethtool_cmd *cmd)
1424 {
1425         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1426         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427         char ptys_pl[MLXSW_REG_PTYS_LEN];
1428         u32 eth_proto_cap;
1429         u32 eth_proto_admin;
1430         u32 eth_proto_oper;
1431         int err;
1432
1433         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1434         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1435         if (err) {
1436                 netdev_err(dev, "Failed to get proto");
1437                 return err;
1438         }
1439         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1440                               &eth_proto_admin, &eth_proto_oper);
1441
1442         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1443                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1444                          SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1445         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1446         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1447                                         eth_proto_oper, cmd);
1448
1449         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1450         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1451         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1452
1453         cmd->transceiver = XCVR_INTERNAL;
1454         return 0;
1455 }
1456
1457 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1458 {
1459         u32 ptys_proto = 0;
1460         int i;
1461
1462         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1463                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1464                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1465         }
1466         return ptys_proto;
1467 }
1468
1469 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1470 {
1471         u32 ptys_proto = 0;
1472         int i;
1473
1474         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1475                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1476                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1477         }
1478         return ptys_proto;
1479 }
1480
1481 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1482 {
1483         u32 ptys_proto = 0;
1484         int i;
1485
1486         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1487                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1488                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1489         }
1490         return ptys_proto;
1491 }
1492
1493 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1494                                       struct ethtool_cmd *cmd)
1495 {
1496         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1497         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1498         char ptys_pl[MLXSW_REG_PTYS_LEN];
1499         u32 speed;
1500         u32 eth_proto_new;
1501         u32 eth_proto_cap;
1502         u32 eth_proto_admin;
1503         bool is_up;
1504         int err;
1505
1506         speed = ethtool_cmd_speed(cmd);
1507
1508         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1509                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1510                 mlxsw_sp_to_ptys_speed(speed);
1511
1512         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1513         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1514         if (err) {
1515                 netdev_err(dev, "Failed to get proto");
1516                 return err;
1517         }
1518         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1519
1520         eth_proto_new = eth_proto_new & eth_proto_cap;
1521         if (!eth_proto_new) {
1522                 netdev_err(dev, "Not supported proto admin requested");
1523                 return -EINVAL;
1524         }
1525         if (eth_proto_new == eth_proto_admin)
1526                 return 0;
1527
1528         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1529         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1530         if (err) {
1531                 netdev_err(dev, "Failed to set proto admin");
1532                 return err;
1533         }
1534
1535         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1536         if (err) {
1537                 netdev_err(dev, "Failed to get oper status");
1538                 return err;
1539         }
1540         if (!is_up)
1541                 return 0;
1542
1543         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1544         if (err) {
1545                 netdev_err(dev, "Failed to set admin status");
1546                 return err;
1547         }
1548
1549         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1550         if (err) {
1551                 netdev_err(dev, "Failed to set admin status");
1552                 return err;
1553         }
1554
1555         return 0;
1556 }
1557
1558 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1559         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1560         .get_link               = ethtool_op_get_link,
1561         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
1562         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
1563         .get_strings            = mlxsw_sp_port_get_strings,
1564         .set_phys_id            = mlxsw_sp_port_set_phys_id,
1565         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1566         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1567         .get_settings           = mlxsw_sp_port_get_settings,
1568         .set_settings           = mlxsw_sp_port_set_settings,
1569 };
1570
1571 static int
1572 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1573 {
1574         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1575         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1576         char ptys_pl[MLXSW_REG_PTYS_LEN];
1577         u32 eth_proto_admin;
1578
1579         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1580         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1581                             eth_proto_admin);
1582         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1583 }
1584
1585 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1586                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1587                           bool dwrr, u8 dwrr_weight)
1588 {
1589         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1590         char qeec_pl[MLXSW_REG_QEEC_LEN];
1591
1592         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1593                             next_index);
1594         mlxsw_reg_qeec_de_set(qeec_pl, true);
1595         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1596         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1597         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1598 }
1599
1600 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1601                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1602                                   u8 next_index, u32 maxrate)
1603 {
1604         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1605         char qeec_pl[MLXSW_REG_QEEC_LEN];
1606
1607         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1608                             next_index);
1609         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1610         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1611         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1612 }
1613
1614 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1615                               u8 switch_prio, u8 tclass)
1616 {
1617         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1618         char qtct_pl[MLXSW_REG_QTCT_LEN];
1619
1620         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1621                             tclass);
1622         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1623 }
1624
1625 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1626 {
1627         int err, i;
1628
1629         /* Setup the elements hierarcy, so that each TC is linked to
1630          * one subgroup, which are all member in the same group.
1631          */
1632         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1633                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1634                                     0);
1635         if (err)
1636                 return err;
1637         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1638                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1639                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1640                                             0, false, 0);
1641                 if (err)
1642                         return err;
1643         }
1644         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1645                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1646                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1647                                             false, 0);
1648                 if (err)
1649                         return err;
1650         }
1651
1652         /* Make sure the max shaper is disabled in all hierarcies that
1653          * support it.
1654          */
1655         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1656                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1657                                             MLXSW_REG_QEEC_MAS_DIS);
1658         if (err)
1659                 return err;
1660         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1661                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1662                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1663                                                     i, 0,
1664                                                     MLXSW_REG_QEEC_MAS_DIS);
1665                 if (err)
1666                         return err;
1667         }
1668         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1669                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1670                                                     MLXSW_REG_QEEC_HIERARCY_TC,
1671                                                     i, i,
1672                                                     MLXSW_REG_QEEC_MAS_DIS);
1673                 if (err)
1674                         return err;
1675         }
1676
1677         /* Map all priorities to traffic class 0. */
1678         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1679                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1680                 if (err)
1681                         return err;
1682         }
1683
1684         return 0;
1685 }
1686
1687 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1688                                 bool split, u8 module, u8 width, u8 lane)
1689 {
1690         struct mlxsw_sp_port *mlxsw_sp_port;
1691         struct net_device *dev;
1692         size_t bytes;
1693         int err;
1694
1695         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1696         if (!dev)
1697                 return -ENOMEM;
1698         mlxsw_sp_port = netdev_priv(dev);
1699         mlxsw_sp_port->dev = dev;
1700         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1701         mlxsw_sp_port->local_port = local_port;
1702         mlxsw_sp_port->split = split;
1703         mlxsw_sp_port->mapping.module = module;
1704         mlxsw_sp_port->mapping.width = width;
1705         mlxsw_sp_port->mapping.lane = lane;
1706         bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1707         mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1708         if (!mlxsw_sp_port->active_vlans) {
1709                 err = -ENOMEM;
1710                 goto err_port_active_vlans_alloc;
1711         }
1712         mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1713         if (!mlxsw_sp_port->untagged_vlans) {
1714                 err = -ENOMEM;
1715                 goto err_port_untagged_vlans_alloc;
1716         }
1717         INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1718
1719         mlxsw_sp_port->pcpu_stats =
1720                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1721         if (!mlxsw_sp_port->pcpu_stats) {
1722                 err = -ENOMEM;
1723                 goto err_alloc_stats;
1724         }
1725
1726         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1727         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1728
1729         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1730         if (err) {
1731                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1732                         mlxsw_sp_port->local_port);
1733                 goto err_dev_addr_init;
1734         }
1735
1736         netif_carrier_off(dev);
1737
1738         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1739                          NETIF_F_HW_VLAN_CTAG_FILTER;
1740
1741         /* Each packet needs to have a Tx header (metadata) on top all other
1742          * headers.
1743          */
1744         dev->hard_header_len += MLXSW_TXHDR_LEN;
1745
1746         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1747         if (err) {
1748                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1749                         mlxsw_sp_port->local_port);
1750                 goto err_port_system_port_mapping_set;
1751         }
1752
1753         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1754         if (err) {
1755                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1756                         mlxsw_sp_port->local_port);
1757                 goto err_port_swid_set;
1758         }
1759
1760         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1761         if (err) {
1762                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1763                         mlxsw_sp_port->local_port);
1764                 goto err_port_speed_by_width_set;
1765         }
1766
1767         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1768         if (err) {
1769                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1770                         mlxsw_sp_port->local_port);
1771                 goto err_port_mtu_set;
1772         }
1773
1774         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1775         if (err)
1776                 goto err_port_admin_status_set;
1777
1778         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1779         if (err) {
1780                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1781                         mlxsw_sp_port->local_port);
1782                 goto err_port_buffers_init;
1783         }
1784
1785         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1786         if (err) {
1787                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1788                         mlxsw_sp_port->local_port);
1789                 goto err_port_ets_init;
1790         }
1791
1792         /* ETS and buffers must be initialized before DCB. */
1793         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1794         if (err) {
1795                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1796                         mlxsw_sp_port->local_port);
1797                 goto err_port_dcb_init;
1798         }
1799
1800         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1801         err = register_netdev(dev);
1802         if (err) {
1803                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1804                         mlxsw_sp_port->local_port);
1805                 goto err_register_netdev;
1806         }
1807
1808         err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1809                                    mlxsw_sp_port->local_port, dev,
1810                                    mlxsw_sp_port->split, module);
1811         if (err) {
1812                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1813                         mlxsw_sp_port->local_port);
1814                 goto err_core_port_init;
1815         }
1816
1817         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1818         if (err)
1819                 goto err_port_vlan_init;
1820
1821         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1822         return 0;
1823
1824 err_port_vlan_init:
1825         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1826 err_core_port_init:
1827         unregister_netdev(dev);
1828 err_register_netdev:
1829 err_port_dcb_init:
1830 err_port_ets_init:
1831 err_port_buffers_init:
1832 err_port_admin_status_set:
1833 err_port_mtu_set:
1834 err_port_speed_by_width_set:
1835 err_port_swid_set:
1836 err_port_system_port_mapping_set:
1837 err_dev_addr_init:
1838         free_percpu(mlxsw_sp_port->pcpu_stats);
1839 err_alloc_stats:
1840         kfree(mlxsw_sp_port->untagged_vlans);
1841 err_port_untagged_vlans_alloc:
1842         kfree(mlxsw_sp_port->active_vlans);
1843 err_port_active_vlans_alloc:
1844         free_netdev(dev);
1845         return err;
1846 }
1847
1848 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1849 {
1850         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1851
1852         if (!mlxsw_sp_port)
1853                 return;
1854         mlxsw_sp->ports[local_port] = NULL;
1855         mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1856         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1857         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1858         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1859         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1860         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1861         mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1862         free_percpu(mlxsw_sp_port->pcpu_stats);
1863         kfree(mlxsw_sp_port->untagged_vlans);
1864         kfree(mlxsw_sp_port->active_vlans);
1865         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1866         free_netdev(mlxsw_sp_port->dev);
1867 }
1868
1869 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1870 {
1871         int i;
1872
1873         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1874                 mlxsw_sp_port_remove(mlxsw_sp, i);
1875         kfree(mlxsw_sp->ports);
1876 }
1877
1878 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1879 {
1880         u8 module, width, lane;
1881         size_t alloc_size;
1882         int i;
1883         int err;
1884
1885         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1886         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1887         if (!mlxsw_sp->ports)
1888                 return -ENOMEM;
1889
1890         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1891                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1892                                                     &width, &lane);
1893                 if (err)
1894                         goto err_port_module_info_get;
1895                 if (!width)
1896                         continue;
1897                 mlxsw_sp->port_to_module[i] = module;
1898                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1899                                            lane);
1900                 if (err)
1901                         goto err_port_create;
1902         }
1903         return 0;
1904
1905 err_port_create:
1906 err_port_module_info_get:
1907         for (i--; i >= 1; i--)
1908                 mlxsw_sp_port_remove(mlxsw_sp, i);
1909         kfree(mlxsw_sp->ports);
1910         return err;
1911 }
1912
1913 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1914 {
1915         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1916
1917         return local_port - offset;
1918 }
1919
1920 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1921                                       u8 module, unsigned int count)
1922 {
1923         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1924         int err, i;
1925
1926         for (i = 0; i < count; i++) {
1927                 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1928                                                width, i * width);
1929                 if (err)
1930                         goto err_port_module_map;
1931         }
1932
1933         for (i = 0; i < count; i++) {
1934                 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1935                 if (err)
1936                         goto err_port_swid_set;
1937         }
1938
1939         for (i = 0; i < count; i++) {
1940                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1941                                            module, width, i * width);
1942                 if (err)
1943                         goto err_port_create;
1944         }
1945
1946         return 0;
1947
1948 err_port_create:
1949         for (i--; i >= 0; i--)
1950                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1951         i = count;
1952 err_port_swid_set:
1953         for (i--; i >= 0; i--)
1954                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1955                                          MLXSW_PORT_SWID_DISABLED_PORT);
1956         i = count;
1957 err_port_module_map:
1958         for (i--; i >= 0; i--)
1959                 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1960         return err;
1961 }
1962
1963 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1964                                          u8 base_port, unsigned int count)
1965 {
1966         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1967         int i;
1968
1969         /* Split by four means we need to re-create two ports, otherwise
1970          * only one.
1971          */
1972         count = count / 2;
1973
1974         for (i = 0; i < count; i++) {
1975                 local_port = base_port + i * 2;
1976                 module = mlxsw_sp->port_to_module[local_port];
1977
1978                 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1979                                          0);
1980         }
1981
1982         for (i = 0; i < count; i++)
1983                 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1984
1985         for (i = 0; i < count; i++) {
1986                 local_port = base_port + i * 2;
1987                 module = mlxsw_sp->port_to_module[local_port];
1988
1989                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1990                                      width, 0);
1991         }
1992 }
1993
1994 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1995                                unsigned int count)
1996 {
1997         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1998         struct mlxsw_sp_port *mlxsw_sp_port;
1999         u8 module, cur_width, base_port;
2000         int i;
2001         int err;
2002
2003         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2004         if (!mlxsw_sp_port) {
2005                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2006                         local_port);
2007                 return -EINVAL;
2008         }
2009
2010         module = mlxsw_sp_port->mapping.module;
2011         cur_width = mlxsw_sp_port->mapping.width;
2012
2013         if (count != 2 && count != 4) {
2014                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2015                 return -EINVAL;
2016         }
2017
2018         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2019                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2020                 return -EINVAL;
2021         }
2022
2023         /* Make sure we have enough slave (even) ports for the split. */
2024         if (count == 2) {
2025                 base_port = local_port;
2026                 if (mlxsw_sp->ports[base_port + 1]) {
2027                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2028                         return -EINVAL;
2029                 }
2030         } else {
2031                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2032                 if (mlxsw_sp->ports[base_port + 1] ||
2033                     mlxsw_sp->ports[base_port + 3]) {
2034                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2035                         return -EINVAL;
2036                 }
2037         }
2038
2039         for (i = 0; i < count; i++)
2040                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2041
2042         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2043         if (err) {
2044                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2045                 goto err_port_split_create;
2046         }
2047
2048         return 0;
2049
2050 err_port_split_create:
2051         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2052         return err;
2053 }
2054
2055 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2056 {
2057         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2058         struct mlxsw_sp_port *mlxsw_sp_port;
2059         u8 cur_width, base_port;
2060         unsigned int count;
2061         int i;
2062
2063         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2064         if (!mlxsw_sp_port) {
2065                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2066                         local_port);
2067                 return -EINVAL;
2068         }
2069
2070         if (!mlxsw_sp_port->split) {
2071                 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2072                 return -EINVAL;
2073         }
2074
2075         cur_width = mlxsw_sp_port->mapping.width;
2076         count = cur_width == 1 ? 4 : 2;
2077
2078         base_port = mlxsw_sp_cluster_base_port_get(local_port);
2079
2080         /* Determine which ports to remove. */
2081         if (count == 2 && local_port >= base_port + 2)
2082                 base_port = base_port + 2;
2083
2084         for (i = 0; i < count; i++)
2085                 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2086
2087         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2088
2089         return 0;
2090 }
2091
2092 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2093                                      char *pude_pl, void *priv)
2094 {
2095         struct mlxsw_sp *mlxsw_sp = priv;
2096         struct mlxsw_sp_port *mlxsw_sp_port;
2097         enum mlxsw_reg_pude_oper_status status;
2098         u8 local_port;
2099
2100         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2101         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2102         if (!mlxsw_sp_port)
2103                 return;
2104
2105         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2106         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2107                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2108                 netif_carrier_on(mlxsw_sp_port->dev);
2109         } else {
2110                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2111                 netif_carrier_off(mlxsw_sp_port->dev);
2112         }
2113 }
2114
2115 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2116         .func = mlxsw_sp_pude_event_func,
2117         .trap_id = MLXSW_TRAP_ID_PUDE,
2118 };
2119
2120 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2121                                    enum mlxsw_event_trap_id trap_id)
2122 {
2123         struct mlxsw_event_listener *el;
2124         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2125         int err;
2126
2127         switch (trap_id) {
2128         case MLXSW_TRAP_ID_PUDE:
2129                 el = &mlxsw_sp_pude_event;
2130                 break;
2131         }
2132         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2133         if (err)
2134                 return err;
2135
2136         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2137         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2138         if (err)
2139                 goto err_event_trap_set;
2140
2141         return 0;
2142
2143 err_event_trap_set:
2144         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2145         return err;
2146 }
2147
2148 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2149                                       enum mlxsw_event_trap_id trap_id)
2150 {
2151         struct mlxsw_event_listener *el;
2152
2153         switch (trap_id) {
2154         case MLXSW_TRAP_ID_PUDE:
2155                 el = &mlxsw_sp_pude_event;
2156                 break;
2157         }
2158         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2159 }
2160
2161 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2162                                       void *priv)
2163 {
2164         struct mlxsw_sp *mlxsw_sp = priv;
2165         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2166         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2167
2168         if (unlikely(!mlxsw_sp_port)) {
2169                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2170                                      local_port);
2171                 return;
2172         }
2173
2174         skb->dev = mlxsw_sp_port->dev;
2175
2176         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2177         u64_stats_update_begin(&pcpu_stats->syncp);
2178         pcpu_stats->rx_packets++;
2179         pcpu_stats->rx_bytes += skb->len;
2180         u64_stats_update_end(&pcpu_stats->syncp);
2181
2182         skb->protocol = eth_type_trans(skb, skb->dev);
2183         netif_receive_skb(skb);
2184 }
2185
2186 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2187         {
2188                 .func = mlxsw_sp_rx_listener_func,
2189                 .local_port = MLXSW_PORT_DONT_CARE,
2190                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2191         },
2192         /* Traps for specific L2 packet types, not trapped as FDB MC */
2193         {
2194                 .func = mlxsw_sp_rx_listener_func,
2195                 .local_port = MLXSW_PORT_DONT_CARE,
2196                 .trap_id = MLXSW_TRAP_ID_STP,
2197         },
2198         {
2199                 .func = mlxsw_sp_rx_listener_func,
2200                 .local_port = MLXSW_PORT_DONT_CARE,
2201                 .trap_id = MLXSW_TRAP_ID_LACP,
2202         },
2203         {
2204                 .func = mlxsw_sp_rx_listener_func,
2205                 .local_port = MLXSW_PORT_DONT_CARE,
2206                 .trap_id = MLXSW_TRAP_ID_EAPOL,
2207         },
2208         {
2209                 .func = mlxsw_sp_rx_listener_func,
2210                 .local_port = MLXSW_PORT_DONT_CARE,
2211                 .trap_id = MLXSW_TRAP_ID_LLDP,
2212         },
2213         {
2214                 .func = mlxsw_sp_rx_listener_func,
2215                 .local_port = MLXSW_PORT_DONT_CARE,
2216                 .trap_id = MLXSW_TRAP_ID_MMRP,
2217         },
2218         {
2219                 .func = mlxsw_sp_rx_listener_func,
2220                 .local_port = MLXSW_PORT_DONT_CARE,
2221                 .trap_id = MLXSW_TRAP_ID_MVRP,
2222         },
2223         {
2224                 .func = mlxsw_sp_rx_listener_func,
2225                 .local_port = MLXSW_PORT_DONT_CARE,
2226                 .trap_id = MLXSW_TRAP_ID_RPVST,
2227         },
2228         {
2229                 .func = mlxsw_sp_rx_listener_func,
2230                 .local_port = MLXSW_PORT_DONT_CARE,
2231                 .trap_id = MLXSW_TRAP_ID_DHCP,
2232         },
2233         {
2234                 .func = mlxsw_sp_rx_listener_func,
2235                 .local_port = MLXSW_PORT_DONT_CARE,
2236                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2237         },
2238         {
2239                 .func = mlxsw_sp_rx_listener_func,
2240                 .local_port = MLXSW_PORT_DONT_CARE,
2241                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2242         },
2243         {
2244                 .func = mlxsw_sp_rx_listener_func,
2245                 .local_port = MLXSW_PORT_DONT_CARE,
2246                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2247         },
2248         {
2249                 .func = mlxsw_sp_rx_listener_func,
2250                 .local_port = MLXSW_PORT_DONT_CARE,
2251                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2252         },
2253         {
2254                 .func = mlxsw_sp_rx_listener_func,
2255                 .local_port = MLXSW_PORT_DONT_CARE,
2256                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2257         },
2258 };
2259
2260 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2261 {
2262         char htgt_pl[MLXSW_REG_HTGT_LEN];
2263         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2264         int i;
2265         int err;
2266
2267         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2268         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2269         if (err)
2270                 return err;
2271
2272         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2273         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2274         if (err)
2275                 return err;
2276
2277         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2278                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2279                                                       &mlxsw_sp_rx_listener[i],
2280                                                       mlxsw_sp);
2281                 if (err)
2282                         goto err_rx_listener_register;
2283
2284                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2285                                     mlxsw_sp_rx_listener[i].trap_id);
2286                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2287                 if (err)
2288                         goto err_rx_trap_set;
2289         }
2290         return 0;
2291
2292 err_rx_trap_set:
2293         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2294                                           &mlxsw_sp_rx_listener[i],
2295                                           mlxsw_sp);
2296 err_rx_listener_register:
2297         for (i--; i >= 0; i--) {
2298                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2299                                     mlxsw_sp_rx_listener[i].trap_id);
2300                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2301
2302                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2303                                                   &mlxsw_sp_rx_listener[i],
2304                                                   mlxsw_sp);
2305         }
2306         return err;
2307 }
2308
2309 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2310 {
2311         char hpkt_pl[MLXSW_REG_HPKT_LEN];
2312         int i;
2313
2314         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2315                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2316                                     mlxsw_sp_rx_listener[i].trap_id);
2317                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2318
2319                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2320                                                   &mlxsw_sp_rx_listener[i],
2321                                                   mlxsw_sp);
2322         }
2323 }
2324
2325 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2326                                  enum mlxsw_reg_sfgc_type type,
2327                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
2328 {
2329         enum mlxsw_flood_table_type table_type;
2330         enum mlxsw_sp_flood_table flood_table;
2331         char sfgc_pl[MLXSW_REG_SFGC_LEN];
2332
2333         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2334                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2335         else
2336                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2337
2338         if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2339                 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2340         else
2341                 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2342
2343         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2344                             flood_table);
2345         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2346 }
2347
2348 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2349 {
2350         int type, err;
2351
2352         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2353                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2354                         continue;
2355
2356                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2357                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2358                 if (err)
2359                         return err;
2360
2361                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2362                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2363                 if (err)
2364                         return err;
2365         }
2366
2367         return 0;
2368 }
2369
2370 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2371 {
2372         char slcr_pl[MLXSW_REG_SLCR_LEN];
2373
2374         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2375                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2376                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2377                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2378                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2379                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2380                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2381                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2382                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2383         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2384 }
2385
2386 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2387                          const struct mlxsw_bus_info *mlxsw_bus_info)
2388 {
2389         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2390         int err;
2391
2392         mlxsw_sp->core = mlxsw_core;
2393         mlxsw_sp->bus_info = mlxsw_bus_info;
2394         INIT_LIST_HEAD(&mlxsw_sp->fids);
2395         INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2396         INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2397         INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2398
2399         err = mlxsw_sp_base_mac_get(mlxsw_sp);
2400         if (err) {
2401                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2402                 return err;
2403         }
2404
2405         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2406         if (err) {
2407                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2408                 return err;
2409         }
2410
2411         err = mlxsw_sp_traps_init(mlxsw_sp);
2412         if (err) {
2413                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2414                 goto err_rx_listener_register;
2415         }
2416
2417         err = mlxsw_sp_flood_init(mlxsw_sp);
2418         if (err) {
2419                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2420                 goto err_flood_init;
2421         }
2422
2423         err = mlxsw_sp_buffers_init(mlxsw_sp);
2424         if (err) {
2425                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2426                 goto err_buffers_init;
2427         }
2428
2429         err = mlxsw_sp_lag_init(mlxsw_sp);
2430         if (err) {
2431                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2432                 goto err_lag_init;
2433         }
2434
2435         err = mlxsw_sp_switchdev_init(mlxsw_sp);
2436         if (err) {
2437                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2438                 goto err_switchdev_init;
2439         }
2440
2441         err = mlxsw_sp_router_init(mlxsw_sp);
2442         if (err) {
2443                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2444                 goto err_router_init;
2445         }
2446
2447         err = mlxsw_sp_ports_create(mlxsw_sp);
2448         if (err) {
2449                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2450                 goto err_ports_create;
2451         }
2452
2453         return 0;
2454
2455 err_ports_create:
2456         mlxsw_sp_router_fini(mlxsw_sp);
2457 err_router_init:
2458         mlxsw_sp_switchdev_fini(mlxsw_sp);
2459 err_switchdev_init:
2460 err_lag_init:
2461         mlxsw_sp_buffers_fini(mlxsw_sp);
2462 err_buffers_init:
2463 err_flood_init:
2464         mlxsw_sp_traps_fini(mlxsw_sp);
2465 err_rx_listener_register:
2466         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2467         return err;
2468 }
2469
2470 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2471 {
2472         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2473         int i;
2474
2475         mlxsw_sp_ports_remove(mlxsw_sp);
2476         mlxsw_sp_router_fini(mlxsw_sp);
2477         mlxsw_sp_switchdev_fini(mlxsw_sp);
2478         mlxsw_sp_buffers_fini(mlxsw_sp);
2479         mlxsw_sp_traps_fini(mlxsw_sp);
2480         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2481         WARN_ON(!list_empty(&mlxsw_sp->fids));
2482         for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2483                 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2484 }
2485
2486 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2487         .used_max_vepa_channels         = 1,
2488         .max_vepa_channels              = 0,
2489         .used_max_lag                   = 1,
2490         .max_lag                        = MLXSW_SP_LAG_MAX,
2491         .used_max_port_per_lag          = 1,
2492         .max_port_per_lag               = MLXSW_SP_PORT_PER_LAG_MAX,
2493         .used_max_mid                   = 1,
2494         .max_mid                        = MLXSW_SP_MID_MAX,
2495         .used_max_pgt                   = 1,
2496         .max_pgt                        = 0,
2497         .used_max_system_port           = 1,
2498         .max_system_port                = 64,
2499         .used_max_vlan_groups           = 1,
2500         .max_vlan_groups                = 127,
2501         .used_max_regions               = 1,
2502         .max_regions                    = 400,
2503         .used_flood_tables              = 1,
2504         .used_flood_mode                = 1,
2505         .flood_mode                     = 3,
2506         .max_fid_offset_flood_tables    = 2,
2507         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
2508         .max_fid_flood_tables           = 2,
2509         .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
2510         .used_max_ib_mc                 = 1,
2511         .max_ib_mc                      = 0,
2512         .used_max_pkey                  = 1,
2513         .max_pkey                       = 0,
2514         .swid_config                    = {
2515                 {
2516                         .used_type      = 1,
2517                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
2518                 }
2519         },
2520 };
2521
2522 static struct mlxsw_driver mlxsw_sp_driver = {
2523         .kind                           = MLXSW_DEVICE_KIND_SPECTRUM,
2524         .owner                          = THIS_MODULE,
2525         .priv_size                      = sizeof(struct mlxsw_sp),
2526         .init                           = mlxsw_sp_init,
2527         .fini                           = mlxsw_sp_fini,
2528         .port_split                     = mlxsw_sp_port_split,
2529         .port_unsplit                   = mlxsw_sp_port_unsplit,
2530         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
2531         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
2532         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
2533         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
2534         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
2535         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
2536         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
2537         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
2538         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
2539         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
2540         .txhdr_construct                = mlxsw_sp_txhdr_construct,
2541         .txhdr_len                      = MLXSW_TXHDR_LEN,
2542         .profile                        = &mlxsw_sp_config_profile,
2543 };
2544
2545 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2546                                          u16 fid)
2547 {
2548         if (mlxsw_sp_fid_is_vfid(fid))
2549                 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
2550         else
2551                 return test_bit(fid, lag_port->active_vlans);
2552 }
2553
2554 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2555                                            u16 fid)
2556 {
2557         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2558         u8 local_port = mlxsw_sp_port->local_port;
2559         u16 lag_id = mlxsw_sp_port->lag_id;
2560         int i, count = 0;
2561
2562         if (!mlxsw_sp_port->lagged)
2563                 return true;
2564
2565         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2566                 struct mlxsw_sp_port *lag_port;
2567
2568                 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2569                 if (!lag_port || lag_port->local_port == local_port)
2570                         continue;
2571                 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
2572                         count++;
2573         }
2574
2575         return !count;
2576 }
2577
2578 static int
2579 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2580                                     u16 fid)
2581 {
2582         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2583         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2584
2585         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2586         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2587         mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2588                                                 mlxsw_sp_port->local_port);
2589
2590         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
2591                    mlxsw_sp_port->local_port, fid);
2592
2593         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2594 }
2595
2596 static int
2597 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2598                                       u16 fid)
2599 {
2600         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2601         char sfdf_pl[MLXSW_REG_SFDF_LEN];
2602
2603         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2604         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2605         mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2606
2607         netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
2608                    mlxsw_sp_port->lag_id, fid);
2609
2610         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2611 }
2612
2613 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2614 {
2615         if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
2616                 return 0;
2617
2618         if (mlxsw_sp_port->lagged)
2619                 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
2620                                                              fid);
2621         else
2622                 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2623 }
2624
2625 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2626 {
2627         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2628 }
2629
2630 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2631                                          struct net_device *br_dev)
2632 {
2633         return !mlxsw_sp->master_bridge.dev ||
2634                mlxsw_sp->master_bridge.dev == br_dev;
2635 }
2636
2637 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2638                                        struct net_device *br_dev)
2639 {
2640         mlxsw_sp->master_bridge.dev = br_dev;
2641         mlxsw_sp->master_bridge.ref_count++;
2642 }
2643
2644 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2645 {
2646         if (--mlxsw_sp->master_bridge.ref_count == 0)
2647                 mlxsw_sp->master_bridge.dev = NULL;
2648 }
2649
2650 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2651                                      struct net_device *br_dev)
2652 {
2653         struct net_device *dev = mlxsw_sp_port->dev;
2654         int err;
2655
2656         /* When port is not bridged untagged packets are tagged with
2657          * PVID=VID=1, thereby creating an implicit VLAN interface in
2658          * the device. Remove it and let bridge code take care of its
2659          * own VLANs.
2660          */
2661         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2662         if (err)
2663                 return err;
2664
2665         mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2666
2667         mlxsw_sp_port->learning = 1;
2668         mlxsw_sp_port->learning_sync = 1;
2669         mlxsw_sp_port->uc_flood = 1;
2670         mlxsw_sp_port->bridged = 1;
2671
2672         return 0;
2673 }
2674
2675 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2676 {
2677         struct net_device *dev = mlxsw_sp_port->dev;
2678
2679         mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2680
2681         mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2682
2683         mlxsw_sp_port->learning = 0;
2684         mlxsw_sp_port->learning_sync = 0;
2685         mlxsw_sp_port->uc_flood = 0;
2686         mlxsw_sp_port->bridged = 0;
2687
2688         /* Add implicit VLAN interface in the device, so that untagged
2689          * packets will be classified to the default vFID.
2690          */
2691         mlxsw_sp_port_add_vid(dev, 0, 1);
2692 }
2693
2694 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2695 {
2696         char sldr_pl[MLXSW_REG_SLDR_LEN];
2697
2698         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2699         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2700 }
2701
2702 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2703 {
2704         char sldr_pl[MLXSW_REG_SLDR_LEN];
2705
2706         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2707         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2708 }
2709
2710 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2711                                      u16 lag_id, u8 port_index)
2712 {
2713         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2714         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2715
2716         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2717                                       lag_id, port_index);
2718         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2719 }
2720
2721 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2722                                         u16 lag_id)
2723 {
2724         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2725         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2726
2727         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2728                                          lag_id);
2729         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2730 }
2731
2732 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2733                                         u16 lag_id)
2734 {
2735         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2736         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2737
2738         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2739                                         lag_id);
2740         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2741 }
2742
2743 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2744                                          u16 lag_id)
2745 {
2746         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2747         char slcor_pl[MLXSW_REG_SLCOR_LEN];
2748
2749         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2750                                          lag_id);
2751         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2752 }
2753
2754 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2755                                   struct net_device *lag_dev,
2756                                   u16 *p_lag_id)
2757 {
2758         struct mlxsw_sp_upper *lag;
2759         int free_lag_id = -1;
2760         int i;
2761
2762         for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2763                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2764                 if (lag->ref_count) {
2765                         if (lag->dev == lag_dev) {
2766                                 *p_lag_id = i;
2767                                 return 0;
2768                         }
2769                 } else if (free_lag_id < 0) {
2770                         free_lag_id = i;
2771                 }
2772         }
2773         if (free_lag_id < 0)
2774                 return -EBUSY;
2775         *p_lag_id = free_lag_id;
2776         return 0;
2777 }
2778
2779 static bool
2780 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2781                           struct net_device *lag_dev,
2782                           struct netdev_lag_upper_info *lag_upper_info)
2783 {
2784         u16 lag_id;
2785
2786         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2787                 return false;
2788         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2789                 return false;
2790         return true;
2791 }
2792
2793 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2794                                        u16 lag_id, u8 *p_port_index)
2795 {
2796         int i;
2797
2798         for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2799                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2800                         *p_port_index = i;
2801                         return 0;
2802                 }
2803         }
2804         return -EBUSY;
2805 }
2806
2807 static void
2808 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2809                                   u16 lag_id)
2810 {
2811         struct mlxsw_sp_port *mlxsw_sp_vport;
2812         struct mlxsw_sp_fid *f;
2813
2814         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
2815         if (WARN_ON(!mlxsw_sp_vport))
2816                 return;
2817
2818         /* If vPort is assigned a RIF, then leave it since it's no
2819          * longer valid.
2820          */
2821         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2822         if (f)
2823                 f->leave(mlxsw_sp_vport);
2824
2825         mlxsw_sp_vport->lag_id = lag_id;
2826         mlxsw_sp_vport->lagged = 1;
2827 }
2828
2829 static void
2830 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2831 {
2832         struct mlxsw_sp_port *mlxsw_sp_vport;
2833         struct mlxsw_sp_fid *f;
2834
2835         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
2836         if (WARN_ON(!mlxsw_sp_vport))
2837                 return;
2838
2839         f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2840         if (f)
2841                 f->leave(mlxsw_sp_vport);
2842
2843         mlxsw_sp_vport->lagged = 0;
2844 }
2845
2846 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2847                                   struct net_device *lag_dev)
2848 {
2849         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2850         struct mlxsw_sp_upper *lag;
2851         u16 lag_id;
2852         u8 port_index;
2853         int err;
2854
2855         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2856         if (err)
2857                 return err;
2858         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2859         if (!lag->ref_count) {
2860                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2861                 if (err)
2862                         return err;
2863                 lag->dev = lag_dev;
2864         }
2865
2866         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2867         if (err)
2868                 return err;
2869         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2870         if (err)
2871                 goto err_col_port_add;
2872         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2873         if (err)
2874                 goto err_col_port_enable;
2875
2876         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2877                                    mlxsw_sp_port->local_port);
2878         mlxsw_sp_port->lag_id = lag_id;
2879         mlxsw_sp_port->lagged = 1;
2880         lag->ref_count++;
2881
2882         mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
2883
2884         return 0;
2885
2886 err_col_port_enable:
2887         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2888 err_col_port_add:
2889         if (!lag->ref_count)
2890                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2891         return err;
2892 }
2893
2894 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2895                                     struct net_device *lag_dev)
2896 {
2897         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2898         u16 lag_id = mlxsw_sp_port->lag_id;
2899         struct mlxsw_sp_upper *lag;
2900
2901         if (!mlxsw_sp_port->lagged)
2902                 return;
2903         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2904         WARN_ON(lag->ref_count == 0);
2905
2906         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2907         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2908
2909         if (mlxsw_sp_port->bridged) {
2910                 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2911                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
2912         }
2913
2914         if (lag->ref_count == 1)
2915                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2916
2917         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2918                                      mlxsw_sp_port->local_port);
2919         mlxsw_sp_port->lagged = 0;
2920         lag->ref_count--;
2921
2922         mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
2923 }
2924
2925 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2926                                       u16 lag_id)
2927 {
2928         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2929         char sldr_pl[MLXSW_REG_SLDR_LEN];
2930
2931         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2932                                          mlxsw_sp_port->local_port);
2933         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2934 }
2935
2936 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2937                                          u16 lag_id)
2938 {
2939         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2940         char sldr_pl[MLXSW_REG_SLDR_LEN];
2941
2942         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2943                                             mlxsw_sp_port->local_port);
2944         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2945 }
2946
2947 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2948                                        bool lag_tx_enabled)
2949 {
2950         if (lag_tx_enabled)
2951                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2952                                                   mlxsw_sp_port->lag_id);
2953         else
2954                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2955                                                      mlxsw_sp_port->lag_id);
2956 }
2957
2958 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2959                                      struct netdev_lag_lower_state_info *info)
2960 {
2961         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2962 }
2963
2964 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2965                                    struct net_device *vlan_dev)
2966 {
2967         struct mlxsw_sp_port *mlxsw_sp_vport;
2968         u16 vid = vlan_dev_vlan_id(vlan_dev);
2969
2970         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2971         if (WARN_ON(!mlxsw_sp_vport))
2972                 return -EINVAL;
2973
2974         mlxsw_sp_vport->dev = vlan_dev;
2975
2976         return 0;
2977 }
2978
2979 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2980                                       struct net_device *vlan_dev)
2981 {
2982         struct mlxsw_sp_port *mlxsw_sp_vport;
2983         u16 vid = vlan_dev_vlan_id(vlan_dev);
2984
2985         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2986         if (WARN_ON(!mlxsw_sp_vport))
2987                 return;
2988
2989         mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2990 }
2991
2992 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2993                                                unsigned long event, void *ptr)
2994 {
2995         struct netdev_notifier_changeupper_info *info;
2996         struct mlxsw_sp_port *mlxsw_sp_port;
2997         struct net_device *upper_dev;
2998         struct mlxsw_sp *mlxsw_sp;
2999         int err = 0;
3000
3001         mlxsw_sp_port = netdev_priv(dev);
3002         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3003         info = ptr;
3004
3005         switch (event) {
3006         case NETDEV_PRECHANGEUPPER:
3007                 upper_dev = info->upper_dev;
3008                 if (!is_vlan_dev(upper_dev) &&
3009                     !netif_is_lag_master(upper_dev) &&
3010                     !netif_is_bridge_master(upper_dev))
3011                         return -EINVAL;
3012                 if (!info->linking)
3013                         break;
3014                 /* HW limitation forbids to put ports to multiple bridges. */
3015                 if (netif_is_bridge_master(upper_dev) &&
3016                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3017                         return -EINVAL;
3018                 if (netif_is_lag_master(upper_dev) &&
3019                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3020                                                info->upper_info))
3021                         return -EINVAL;
3022                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3023                         return -EINVAL;
3024                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3025                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3026                         return -EINVAL;
3027                 break;
3028         case NETDEV_CHANGEUPPER:
3029                 upper_dev = info->upper_dev;
3030                 if (is_vlan_dev(upper_dev)) {
3031                         if (info->linking)
3032                                 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3033                                                               upper_dev);
3034                         else
3035                                  mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3036                                                            upper_dev);
3037                 } else if (netif_is_bridge_master(upper_dev)) {
3038                         if (info->linking)
3039                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3040                                                                 upper_dev);
3041                         else
3042                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3043                 } else if (netif_is_lag_master(upper_dev)) {
3044                         if (info->linking)
3045                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3046                                                              upper_dev);
3047                         else
3048                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3049                                                         upper_dev);
3050                 } else {
3051                         err = -EINVAL;
3052                         WARN_ON(1);
3053                 }
3054                 break;
3055         }
3056
3057         return err;
3058 }
3059
3060 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3061                                                unsigned long event, void *ptr)
3062 {
3063         struct netdev_notifier_changelowerstate_info *info;
3064         struct mlxsw_sp_port *mlxsw_sp_port;
3065         int err;
3066
3067         mlxsw_sp_port = netdev_priv(dev);
3068         info = ptr;
3069
3070         switch (event) {
3071         case NETDEV_CHANGELOWERSTATE:
3072                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3073                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3074                                                         info->lower_state_info);
3075                         if (err)
3076                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3077                 }
3078                 break;
3079         }
3080
3081         return 0;
3082 }
3083
3084 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3085                                          unsigned long event, void *ptr)
3086 {
3087         switch (event) {
3088         case NETDEV_PRECHANGEUPPER:
3089         case NETDEV_CHANGEUPPER:
3090                 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3091         case NETDEV_CHANGELOWERSTATE:
3092                 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3093         }
3094
3095         return 0;
3096 }
3097
3098 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3099                                         unsigned long event, void *ptr)
3100 {
3101         struct net_device *dev;
3102         struct list_head *iter;
3103         int ret;
3104
3105         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3106                 if (mlxsw_sp_port_dev_check(dev)) {
3107                         ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3108                         if (ret)
3109                                 return ret;
3110                 }
3111         }
3112
3113         return 0;
3114 }
3115
3116 static struct mlxsw_sp_fid *
3117 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3118                       const struct net_device *br_dev)
3119 {
3120         struct mlxsw_sp_fid *f;
3121
3122         list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) {
3123                 if (f->dev == br_dev)
3124                         return f;
3125         }
3126
3127         return NULL;
3128 }
3129
3130 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3131 {
3132         return vfid - MLXSW_SP_VFID_PORT_MAX;
3133 }
3134
3135 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3136 {
3137         return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3138 }
3139
3140 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3141 {
3142         return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3143                                    MLXSW_SP_VFID_BR_MAX);
3144 }
3145
3146 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3147
3148 static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3149                                                     struct net_device *br_dev)
3150 {
3151         struct device *dev = mlxsw_sp->bus_info->dev;
3152         struct mlxsw_sp_fid *f;
3153         u16 vfid, fid;
3154         int err;
3155
3156         vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3157         if (vfid == MLXSW_SP_VFID_MAX) {
3158                 dev_err(dev, "No available vFIDs\n");
3159                 return ERR_PTR(-ERANGE);
3160         }
3161
3162         fid = mlxsw_sp_vfid_to_fid(vfid);
3163         err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3164         if (err) {
3165                 dev_err(dev, "Failed to create FID=%d\n", fid);
3166                 return ERR_PTR(err);
3167         }
3168
3169         f = kzalloc(sizeof(*f), GFP_KERNEL);
3170         if (!f)
3171                 goto err_allocate_vfid;
3172
3173         f->leave = mlxsw_sp_vport_br_vfid_leave;
3174         f->fid = fid;
3175         f->dev = br_dev;
3176
3177         list_add(&f->list, &mlxsw_sp->br_vfids.list);
3178         set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3179
3180         return f;
3181
3182 err_allocate_vfid:
3183         mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3184         return ERR_PTR(-ENOMEM);
3185 }
3186
3187 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3188                                      struct mlxsw_sp_fid *f)
3189 {
3190         u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3191         u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
3192
3193         clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3194         list_del(&f->list);
3195
3196         mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
3197
3198         kfree(f);
3199 }
3200
3201 static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3202                                        struct net_device *br_dev)
3203 {
3204         struct mlxsw_sp_fid *f;
3205         int err;
3206
3207         f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3208         if (!f) {
3209                 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3210                 if (IS_ERR(f))
3211                         return PTR_ERR(f);
3212         }
3213
3214         err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3215         if (err)
3216                 goto err_vport_flood_set;
3217
3218         err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3219         if (err)
3220                 goto err_vport_fid_map;
3221
3222         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3223         f->ref_count++;
3224
3225         netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3226
3227         return 0;
3228
3229 err_vport_fid_map:
3230         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3231 err_vport_flood_set:
3232         if (!f->ref_count)
3233                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3234         return err;
3235 }
3236
3237 static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3238 {
3239         struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3240
3241         netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3242
3243         mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3244
3245         mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3246
3247         mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3248
3249         mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3250         if (--f->ref_count == 0)
3251                 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3252 }
3253
3254 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3255                                       struct net_device *br_dev)
3256 {
3257         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3258         struct net_device *dev = mlxsw_sp_vport->dev;
3259         int err;
3260
3261         mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3262
3263         err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev);
3264         if (err) {
3265                 netdev_err(dev, "Failed to join vFID\n");
3266                 goto err_vport_br_vfid_join;
3267         }
3268
3269         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3270         if (err) {
3271                 netdev_err(dev, "Failed to enable learning\n");
3272                 goto err_port_vid_learning_set;
3273         }
3274
3275         mlxsw_sp_vport->learning = 1;
3276         mlxsw_sp_vport->learning_sync = 1;
3277         mlxsw_sp_vport->uc_flood = 1;
3278         mlxsw_sp_vport->bridged = 1;
3279
3280         return 0;
3281
3282 err_port_vid_learning_set:
3283         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3284 err_vport_br_vfid_join:
3285         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3286         return err;
3287 }
3288
3289 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3290 {
3291         u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3292
3293         mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3294
3295         mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport);
3296
3297         mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3298
3299         mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3300                                     MLXSW_REG_SPMS_STATE_FORWARDING);
3301
3302         mlxsw_sp_vport->learning = 0;
3303         mlxsw_sp_vport->learning_sync = 0;
3304         mlxsw_sp_vport->uc_flood = 0;
3305         mlxsw_sp_vport->bridged = 0;
3306 }
3307
3308 static bool
3309 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3310                                   const struct net_device *br_dev)
3311 {
3312         struct mlxsw_sp_port *mlxsw_sp_vport;
3313
3314         list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3315                             vport.list) {
3316                 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3317
3318                 if (dev && dev == br_dev)
3319                         return false;
3320         }
3321
3322         return true;
3323 }
3324
3325 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3326                                           unsigned long event, void *ptr,
3327                                           u16 vid)
3328 {
3329         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3330         struct netdev_notifier_changeupper_info *info = ptr;
3331         struct mlxsw_sp_port *mlxsw_sp_vport;
3332         struct net_device *upper_dev;
3333         int err = 0;
3334
3335         mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3336
3337         switch (event) {
3338         case NETDEV_PRECHANGEUPPER:
3339                 upper_dev = info->upper_dev;
3340                 if (!netif_is_bridge_master(upper_dev))
3341                         return -EINVAL;
3342                 if (!info->linking)
3343                         break;
3344                 /* We can't have multiple VLAN interfaces configured on
3345                  * the same port and being members in the same bridge.
3346                  */
3347                 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3348                                                        upper_dev))
3349                         return -EINVAL;
3350                 break;
3351         case NETDEV_CHANGEUPPER:
3352                 upper_dev = info->upper_dev;
3353                 if (info->linking) {
3354                         if (WARN_ON(!mlxsw_sp_vport))
3355                                 return -EINVAL;
3356                         err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3357                                                          upper_dev);
3358                 } else {
3359                         if (!mlxsw_sp_vport)
3360                                 return 0;
3361                         mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3362                 }
3363         }
3364
3365         return err;
3366 }
3367
3368 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3369                                               unsigned long event, void *ptr,
3370                                               u16 vid)
3371 {
3372         struct net_device *dev;
3373         struct list_head *iter;
3374         int ret;
3375
3376         netdev_for_each_lower_dev(lag_dev, dev, iter) {
3377                 if (mlxsw_sp_port_dev_check(dev)) {
3378                         ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3379                                                              vid);
3380                         if (ret)
3381                                 return ret;
3382                 }
3383         }
3384
3385         return 0;
3386 }
3387
3388 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3389                                          unsigned long event, void *ptr)
3390 {
3391         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3392         u16 vid = vlan_dev_vlan_id(vlan_dev);
3393
3394         if (mlxsw_sp_port_dev_check(real_dev))
3395                 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3396                                                       vid);
3397         else if (netif_is_lag_master(real_dev))
3398                 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3399                                                           vid);
3400
3401         return 0;
3402 }
3403
3404 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3405                                     unsigned long event, void *ptr)
3406 {
3407         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3408         int err = 0;
3409
3410         if (mlxsw_sp_port_dev_check(dev))
3411                 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3412         else if (netif_is_lag_master(dev))
3413                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3414         else if (is_vlan_dev(dev))
3415                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3416
3417         return notifier_from_errno(err);
3418 }
3419
3420 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3421         .notifier_call = mlxsw_sp_netdevice_event,
3422 };
3423
3424 static int __init mlxsw_sp_module_init(void)
3425 {
3426         int err;
3427
3428         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3429         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3430         if (err)
3431                 goto err_core_driver_register;
3432         return 0;
3433
3434 err_core_driver_register:
3435         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3436         return err;
3437 }
3438
3439 static void __exit mlxsw_sp_module_exit(void)
3440 {
3441         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3442         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3443 }
3444
3445 module_init(mlxsw_sp_module_init);
3446 module_exit(mlxsw_sp_module_exit);
3447
3448 MODULE_LICENSE("Dual BSD/GPL");
3449 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3450 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3451 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);