Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/random.h>
25 #include <net/switchdev.h>
26 #include <net/pkt_cls.h>
27 #include <net/tc_act/tc_mirred.h>
28 #include <net/netevent.h>
29 #include <net/tc_act/tc_sample.h>
30 #include <net/addrconf.h>
31
32 #include "spectrum.h"
33 #include "pci.h"
34 #include "core.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "../mlxfw/mlxfw.h"
44
45 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
46
47 #define MLXSW_SP1_FWREV_MAJOR 13
48 #define MLXSW_SP1_FWREV_MINOR 1910
49 #define MLXSW_SP1_FWREV_SUBMINOR 622
50 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
51
52 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
53         .major = MLXSW_SP1_FWREV_MAJOR,
54         .minor = MLXSW_SP1_FWREV_MINOR,
55         .subminor = MLXSW_SP1_FWREV_SUBMINOR,
56         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
57 };
58
59 #define MLXSW_SP1_FW_FILENAME \
60         "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
61         "." __stringify(MLXSW_SP1_FWREV_MINOR) \
62         "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
63
64 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67
68 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
69         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
70 };
71 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
72         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
73 };
74
75 /* tx_hdr_version
76  * Tx header version.
77  * Must be set to 1.
78  */
79 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
80
81 /* tx_hdr_ctl
82  * Packet control type.
83  * 0 - Ethernet control (e.g. EMADs, LACP)
84  * 1 - Ethernet data
85  */
86 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
87
88 /* tx_hdr_proto
89  * Packet protocol type. Must be set to 1 (Ethernet).
90  */
91 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
92
93 /* tx_hdr_rx_is_router
94  * Packet is sent from the router. Valid for data packets only.
95  */
96 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
97
98 /* tx_hdr_fid_valid
99  * Indicates if the 'fid' field is valid and should be used for
100  * forwarding lookup. Valid for data packets only.
101  */
102 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
103
104 /* tx_hdr_swid
105  * Switch partition ID. Must be set to 0.
106  */
107 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
108
109 /* tx_hdr_control_tclass
110  * Indicates if the packet should use the control TClass and not one
111  * of the data TClasses.
112  */
113 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
114
115 /* tx_hdr_etclass
116  * Egress TClass to be used on the egress device on the egress port.
117  */
118 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
119
120 /* tx_hdr_port_mid
121  * Destination local port for unicast packets.
122  * Destination multicast ID for multicast packets.
123  *
124  * Control packets are directed to a specific egress port, while data
125  * packets are transmitted through the CPU port (0) into the switch partition,
126  * where forwarding rules are applied.
127  */
128 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
129
130 /* tx_hdr_fid
131  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133  * Valid for data packets only.
134  */
135 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
136
137 /* tx_hdr_type
138  * 0 - Data packets
139  * 6 - Control packets
140  */
141 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
142
143 struct mlxsw_sp_mlxfw_dev {
144         struct mlxfw_dev mlxfw_dev;
145         struct mlxsw_sp *mlxsw_sp;
146 };
147
148 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
149                                     u16 component_index, u32 *p_max_size,
150                                     u8 *p_align_bits, u16 *p_max_write_size)
151 {
152         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
153                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
154         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
155         char mcqi_pl[MLXSW_REG_MCQI_LEN];
156         int err;
157
158         mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
159         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
160         if (err)
161                 return err;
162         mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
163                               p_max_write_size);
164
165         *p_align_bits = max_t(u8, *p_align_bits, 2);
166         *p_max_write_size = min_t(u16, *p_max_write_size,
167                                   MLXSW_REG_MCDA_MAX_DATA_LEN);
168         return 0;
169 }
170
171 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
172 {
173         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
174                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
175         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
176         char mcc_pl[MLXSW_REG_MCC_LEN];
177         u8 control_state;
178         int err;
179
180         mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
182         if (err)
183                 return err;
184
185         mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
186         if (control_state != MLXFW_FSM_STATE_IDLE)
187                 return -EBUSY;
188
189         mlxsw_reg_mcc_pack(mcc_pl,
190                            MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
191                            0, *fwhandle, 0);
192         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
193 }
194
195 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
196                                          u32 fwhandle, u16 component_index,
197                                          u32 component_size)
198 {
199         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
200                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
201         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
202         char mcc_pl[MLXSW_REG_MCC_LEN];
203
204         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
205                            component_index, fwhandle, component_size);
206         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
207 }
208
209 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
210                                        u32 fwhandle, u8 *data, u16 size,
211                                        u32 offset)
212 {
213         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
214                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
215         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
216         char mcda_pl[MLXSW_REG_MCDA_LEN];
217
218         mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
219         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
220 }
221
222 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
223                                          u32 fwhandle, u16 component_index)
224 {
225         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
226                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
227         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
228         char mcc_pl[MLXSW_REG_MCC_LEN];
229
230         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
231                            component_index, fwhandle, 0);
232         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
233 }
234
235 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
236 {
237         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
238                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
239         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
240         char mcc_pl[MLXSW_REG_MCC_LEN];
241
242         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
243                            fwhandle, 0);
244         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
245 }
246
247 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
248                                     enum mlxfw_fsm_state *fsm_state,
249                                     enum mlxfw_fsm_state_err *fsm_state_err)
250 {
251         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
252                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
253         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
254         char mcc_pl[MLXSW_REG_MCC_LEN];
255         u8 control_state;
256         u8 error_code;
257         int err;
258
259         mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
260         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
261         if (err)
262                 return err;
263
264         mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
265         *fsm_state = control_state;
266         *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
267                                MLXFW_FSM_STATE_ERR_MAX);
268         return 0;
269 }
270
271 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
272 {
273         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
274                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
275         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
276         char mcc_pl[MLXSW_REG_MCC_LEN];
277
278         mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
279                            fwhandle, 0);
280         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
281 }
282
283 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
284 {
285         struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
286                 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
287         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
288         char mcc_pl[MLXSW_REG_MCC_LEN];
289
290         mlxsw_reg_mcc_pack(mcc_pl,
291                            MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
292                            fwhandle, 0);
293         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
294 }
295
296 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
297         .component_query        = mlxsw_sp_component_query,
298         .fsm_lock               = mlxsw_sp_fsm_lock,
299         .fsm_component_update   = mlxsw_sp_fsm_component_update,
300         .fsm_block_download     = mlxsw_sp_fsm_block_download,
301         .fsm_component_verify   = mlxsw_sp_fsm_component_verify,
302         .fsm_activate           = mlxsw_sp_fsm_activate,
303         .fsm_query_state        = mlxsw_sp_fsm_query_state,
304         .fsm_cancel             = mlxsw_sp_fsm_cancel,
305         .fsm_release            = mlxsw_sp_fsm_release
306 };
307
308 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
309                                    const struct firmware *firmware)
310 {
311         struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
312                 .mlxfw_dev = {
313                         .ops = &mlxsw_sp_mlxfw_dev_ops,
314                         .psid = mlxsw_sp->bus_info->psid,
315                         .psid_size = strlen(mlxsw_sp->bus_info->psid),
316                 },
317                 .mlxsw_sp = mlxsw_sp
318         };
319         int err;
320
321         mlxsw_core_fw_flash_start(mlxsw_sp->core);
322         err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
323         mlxsw_core_fw_flash_end(mlxsw_sp->core);
324
325         return err;
326 }
327
328 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
329 {
330         const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
331         const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
332         const char *fw_filename = mlxsw_sp->fw_filename;
333         union devlink_param_value value;
334         const struct firmware *firmware;
335         int err;
336
337         /* Don't check if driver does not require it */
338         if (!req_rev || !fw_filename)
339                 return 0;
340
341         /* Don't check if devlink 'fw_load_policy' param is 'flash' */
342         err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
343                                                  DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
344                                                  &value);
345         if (err)
346                 return err;
347         if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
348                 return 0;
349
350         /* Validate driver & FW are compatible */
351         if (rev->major != req_rev->major) {
352                 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
353                      rev->major, req_rev->major);
354                 return -EINVAL;
355         }
356         if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
357             MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
358             (rev->minor > req_rev->minor ||
359              (rev->minor == req_rev->minor &&
360               rev->subminor >= req_rev->subminor)))
361                 return 0;
362
363         dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
364                  rev->major, rev->minor, rev->subminor);
365         dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
366                  fw_filename);
367
368         err = request_firmware_direct(&firmware, fw_filename,
369                                       mlxsw_sp->bus_info->dev);
370         if (err) {
371                 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
372                         fw_filename);
373                 return err;
374         }
375
376         err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
377         release_firmware(firmware);
378         if (err)
379                 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
380
381         /* On FW flash success, tell the caller FW reset is needed
382          * if current FW supports it.
383          */
384         if (rev->minor >= req_rev->can_reset_minor)
385                 return err ? err : -EAGAIN;
386         else
387                 return 0;
388 }
389
390 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
391                               unsigned int counter_index, u64 *packets,
392                               u64 *bytes)
393 {
394         char mgpc_pl[MLXSW_REG_MGPC_LEN];
395         int err;
396
397         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
398                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
399         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
400         if (err)
401                 return err;
402         if (packets)
403                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
404         if (bytes)
405                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
406         return 0;
407 }
408
409 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
410                                        unsigned int counter_index)
411 {
412         char mgpc_pl[MLXSW_REG_MGPC_LEN];
413
414         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
415                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
416         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
417 }
418
419 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
420                                 unsigned int *p_counter_index)
421 {
422         int err;
423
424         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
425                                      p_counter_index);
426         if (err)
427                 return err;
428         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
429         if (err)
430                 goto err_counter_clear;
431         return 0;
432
433 err_counter_clear:
434         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
435                               *p_counter_index);
436         return err;
437 }
438
439 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
440                                 unsigned int counter_index)
441 {
442          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
443                                counter_index);
444 }
445
446 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
447                                      const struct mlxsw_tx_info *tx_info)
448 {
449         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
450
451         memset(txhdr, 0, MLXSW_TXHDR_LEN);
452
453         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
454         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
455         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
456         mlxsw_tx_hdr_swid_set(txhdr, 0);
457         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
458         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
459         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
460 }
461
462 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
463 {
464         switch (state) {
465         case BR_STATE_FORWARDING:
466                 return MLXSW_REG_SPMS_STATE_FORWARDING;
467         case BR_STATE_LEARNING:
468                 return MLXSW_REG_SPMS_STATE_LEARNING;
469         case BR_STATE_LISTENING: /* fall-through */
470         case BR_STATE_DISABLED: /* fall-through */
471         case BR_STATE_BLOCKING:
472                 return MLXSW_REG_SPMS_STATE_DISCARDING;
473         default:
474                 BUG();
475         }
476 }
477
478 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
479                               u8 state)
480 {
481         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
482         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
483         char *spms_pl;
484         int err;
485
486         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
487         if (!spms_pl)
488                 return -ENOMEM;
489         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
490         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
491
492         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
493         kfree(spms_pl);
494         return err;
495 }
496
497 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
498 {
499         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
500         int err;
501
502         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
503         if (err)
504                 return err;
505         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
506         return 0;
507 }
508
509 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
510                                     bool enable, u32 rate)
511 {
512         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513         char mpsc_pl[MLXSW_REG_MPSC_LEN];
514
515         mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
516         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
517 }
518
519 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
520                                           bool is_up)
521 {
522         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523         char paos_pl[MLXSW_REG_PAOS_LEN];
524
525         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
526                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
527                             MLXSW_PORT_ADMIN_STATUS_DOWN);
528         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
529 }
530
531 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
532                                       unsigned char *addr)
533 {
534         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
535         char ppad_pl[MLXSW_REG_PPAD_LEN];
536
537         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
538         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
539         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
540 }
541
542 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
543 {
544         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
546
547         ether_addr_copy(addr, mlxsw_sp->base_mac);
548         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
549         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
550 }
551
552 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
553 {
554         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
555         char pmtu_pl[MLXSW_REG_PMTU_LEN];
556         int max_mtu;
557         int err;
558
559         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
560         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
561         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
562         if (err)
563                 return err;
564         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
565
566         if (mtu > max_mtu)
567                 return -EINVAL;
568
569         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
570         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
571 }
572
573 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
574 {
575         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576         char pspa_pl[MLXSW_REG_PSPA_LEN];
577
578         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
579         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
580 }
581
582 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
583 {
584         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
585         char svpe_pl[MLXSW_REG_SVPE_LEN];
586
587         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
588         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
589 }
590
591 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
592                                    bool learn_enable)
593 {
594         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
595         char *spvmlr_pl;
596         int err;
597
598         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
599         if (!spvmlr_pl)
600                 return -ENOMEM;
601         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
602                               learn_enable);
603         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
604         kfree(spvmlr_pl);
605         return err;
606 }
607
608 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
609                                     u16 vid)
610 {
611         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
612         char spvid_pl[MLXSW_REG_SPVID_LEN];
613
614         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
615         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
616 }
617
618 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
619                                             bool allow)
620 {
621         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622         char spaft_pl[MLXSW_REG_SPAFT_LEN];
623
624         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
625         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
626 }
627
628 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
629 {
630         int err;
631
632         if (!vid) {
633                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
634                 if (err)
635                         return err;
636         } else {
637                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
638                 if (err)
639                         return err;
640                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
641                 if (err)
642                         goto err_port_allow_untagged_set;
643         }
644
645         mlxsw_sp_port->pvid = vid;
646         return 0;
647
648 err_port_allow_untagged_set:
649         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
650         return err;
651 }
652
653 static int
654 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
655 {
656         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
657         char sspr_pl[MLXSW_REG_SSPR_LEN];
658
659         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
660         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
661 }
662
663 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
664                                          u8 local_port, u8 *p_module,
665                                          u8 *p_width, u8 *p_lane)
666 {
667         char pmlp_pl[MLXSW_REG_PMLP_LEN];
668         int err;
669
670         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
671         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
672         if (err)
673                 return err;
674         *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
675         *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
676         *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
677         return 0;
678 }
679
680 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
681                                     u8 module, u8 width, u8 lane)
682 {
683         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
684         char pmlp_pl[MLXSW_REG_PMLP_LEN];
685         int i;
686
687         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
688         mlxsw_reg_pmlp_width_set(pmlp_pl, width);
689         for (i = 0; i < width; i++) {
690                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
691                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
692         }
693
694         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695 }
696
697 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
698 {
699         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700         char pmlp_pl[MLXSW_REG_PMLP_LEN];
701
702         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
703         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
704         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
705 }
706
707 static int mlxsw_sp_port_open(struct net_device *dev)
708 {
709         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
710         int err;
711
712         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
713         if (err)
714                 return err;
715         netif_start_queue(dev);
716         return 0;
717 }
718
719 static int mlxsw_sp_port_stop(struct net_device *dev)
720 {
721         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
722
723         netif_stop_queue(dev);
724         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
725 }
726
727 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
728                                       struct net_device *dev)
729 {
730         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
731         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
732         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
733         const struct mlxsw_tx_info tx_info = {
734                 .local_port = mlxsw_sp_port->local_port,
735                 .is_emad = false,
736         };
737         u64 len;
738         int err;
739
740         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
741                 return NETDEV_TX_BUSY;
742
743         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
744                 struct sk_buff *skb_orig = skb;
745
746                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
747                 if (!skb) {
748                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
749                         dev_kfree_skb_any(skb_orig);
750                         return NETDEV_TX_OK;
751                 }
752                 dev_consume_skb_any(skb_orig);
753         }
754
755         if (eth_skb_pad(skb)) {
756                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
757                 return NETDEV_TX_OK;
758         }
759
760         mlxsw_sp_txhdr_construct(skb, &tx_info);
761         /* TX header is consumed by HW on the way so we shouldn't count its
762          * bytes as being sent.
763          */
764         len = skb->len - MLXSW_TXHDR_LEN;
765
766         /* Due to a race we might fail here because of a full queue. In that
767          * unlikely case we simply drop the packet.
768          */
769         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
770
771         if (!err) {
772                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773                 u64_stats_update_begin(&pcpu_stats->syncp);
774                 pcpu_stats->tx_packets++;
775                 pcpu_stats->tx_bytes += len;
776                 u64_stats_update_end(&pcpu_stats->syncp);
777         } else {
778                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779                 dev_kfree_skb_any(skb);
780         }
781         return NETDEV_TX_OK;
782 }
783
784 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785 {
786 }
787
788 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789 {
790         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791         struct sockaddr *addr = p;
792         int err;
793
794         if (!is_valid_ether_addr(addr->sa_data))
795                 return -EADDRNOTAVAIL;
796
797         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
798         if (err)
799                 return err;
800         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
801         return 0;
802 }
803
804 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
805                                          int mtu)
806 {
807         return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
808 }
809
810 #define MLXSW_SP_CELL_FACTOR 2  /* 2 * cell_size / (IPG + cell_size + 1) */
811
812 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
813                                   u16 delay)
814 {
815         delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
816                                                             BITS_PER_BYTE));
817         return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
818                                                                    mtu);
819 }
820
821 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
822  * Assumes 100m cable and maximum MTU.
823  */
824 #define MLXSW_SP_PAUSE_DELAY 58752
825
826 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
827                                      u16 delay, bool pfc, bool pause)
828 {
829         if (pfc)
830                 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
831         else if (pause)
832                 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
833         else
834                 return 0;
835 }
836
837 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
838                                  bool lossy)
839 {
840         if (lossy)
841                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
842         else
843                 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
844                                                     thres);
845 }
846
847 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
848                                  u8 *prio_tc, bool pause_en,
849                                  struct ieee_pfc *my_pfc)
850 {
851         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
852         u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
853         u16 delay = !!my_pfc ? my_pfc->delay : 0;
854         char pbmc_pl[MLXSW_REG_PBMC_LEN];
855         int i, j, err;
856
857         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
858         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
859         if (err)
860                 return err;
861
862         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
863                 bool configure = false;
864                 bool pfc = false;
865                 u16 thres_cells;
866                 u16 delay_cells;
867                 bool lossy;
868
869                 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
870                         if (prio_tc[j] == i) {
871                                 pfc = pfc_en & BIT(j);
872                                 configure = true;
873                                 break;
874                         }
875                 }
876
877                 if (!configure)
878                         continue;
879
880                 lossy = !(pfc || pause_en);
881                 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
882                 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
883                                                         pfc, pause_en);
884                 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
885                                      thres_cells, lossy);
886         }
887
888         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
889 }
890
891 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
892                                       int mtu, bool pause_en)
893 {
894         u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
895         bool dcb_en = !!mlxsw_sp_port->dcb.ets;
896         struct ieee_pfc *my_pfc;
897         u8 *prio_tc;
898
899         prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
900         my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
901
902         return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
903                                             pause_en, my_pfc);
904 }
905
906 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
907 {
908         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
909         bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
910         int err;
911
912         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
913         if (err)
914                 return err;
915         err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
916         if (err)
917                 goto err_span_port_mtu_update;
918         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
919         if (err)
920                 goto err_port_mtu_set;
921         dev->mtu = mtu;
922         return 0;
923
924 err_port_mtu_set:
925         mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
926 err_span_port_mtu_update:
927         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
928         return err;
929 }
930
931 static int
932 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
933                              struct rtnl_link_stats64 *stats)
934 {
935         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
936         struct mlxsw_sp_port_pcpu_stats *p;
937         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
938         u32 tx_dropped = 0;
939         unsigned int start;
940         int i;
941
942         for_each_possible_cpu(i) {
943                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
944                 do {
945                         start = u64_stats_fetch_begin_irq(&p->syncp);
946                         rx_packets      = p->rx_packets;
947                         rx_bytes        = p->rx_bytes;
948                         tx_packets      = p->tx_packets;
949                         tx_bytes        = p->tx_bytes;
950                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
951
952                 stats->rx_packets       += rx_packets;
953                 stats->rx_bytes         += rx_bytes;
954                 stats->tx_packets       += tx_packets;
955                 stats->tx_bytes         += tx_bytes;
956                 /* tx_dropped is u32, updated without syncp protection. */
957                 tx_dropped      += p->tx_dropped;
958         }
959         stats->tx_dropped       = tx_dropped;
960         return 0;
961 }
962
963 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
964 {
965         switch (attr_id) {
966         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
967                 return true;
968         }
969
970         return false;
971 }
972
973 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
974                                            void *sp)
975 {
976         switch (attr_id) {
977         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
978                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
979         }
980
981         return -EINVAL;
982 }
983
984 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
985                                        int prio, char *ppcnt_pl)
986 {
987         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
988         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
989
990         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
991         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
992 }
993
994 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
995                                       struct rtnl_link_stats64 *stats)
996 {
997         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
998         int err;
999
1000         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1001                                           0, ppcnt_pl);
1002         if (err)
1003                 goto out;
1004
1005         stats->tx_packets =
1006                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1007         stats->rx_packets =
1008                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1009         stats->tx_bytes =
1010                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1011         stats->rx_bytes =
1012                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1013         stats->multicast =
1014                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1015
1016         stats->rx_crc_errors =
1017                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1018         stats->rx_frame_errors =
1019                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1020
1021         stats->rx_length_errors = (
1022                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1023                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1024                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1025
1026         stats->rx_errors = (stats->rx_crc_errors +
1027                 stats->rx_frame_errors + stats->rx_length_errors);
1028
1029 out:
1030         return err;
1031 }
1032
1033 static void
1034 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
1035                             struct mlxsw_sp_port_xstats *xstats)
1036 {
1037         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1038         int err, i;
1039
1040         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
1041                                           ppcnt_pl);
1042         if (!err)
1043                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
1044
1045         for (i = 0; i < TC_MAX_QUEUE; i++) {
1046                 err = mlxsw_sp_port_get_stats_raw(dev,
1047                                                   MLXSW_REG_PPCNT_TC_CONG_TC,
1048                                                   i, ppcnt_pl);
1049                 if (!err)
1050                         xstats->wred_drop[i] =
1051                                 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
1052
1053                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
1054                                                   i, ppcnt_pl);
1055                 if (err)
1056                         continue;
1057
1058                 xstats->backlog[i] =
1059                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1060                 xstats->tail_drop[i] =
1061                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
1062         }
1063
1064         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1065                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
1066                                                   i, ppcnt_pl);
1067                 if (err)
1068                         continue;
1069
1070                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
1071                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
1072         }
1073 }
1074
1075 static void update_stats_cache(struct work_struct *work)
1076 {
1077         struct mlxsw_sp_port *mlxsw_sp_port =
1078                 container_of(work, struct mlxsw_sp_port,
1079                              periodic_hw_stats.update_dw.work);
1080
1081         if (!netif_carrier_ok(mlxsw_sp_port->dev))
1082                 goto out;
1083
1084         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1085                                    &mlxsw_sp_port->periodic_hw_stats.stats);
1086         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
1087                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
1088
1089 out:
1090         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1091                                MLXSW_HW_STATS_UPDATE_TIME);
1092 }
1093
1094 /* Return the stats from a cache that is updated periodically,
1095  * as this function might get called in an atomic context.
1096  */
1097 static void
1098 mlxsw_sp_port_get_stats64(struct net_device *dev,
1099                           struct rtnl_link_stats64 *stats)
1100 {
1101         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1102
1103         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1104 }
1105
1106 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1107                                     u16 vid_begin, u16 vid_end,
1108                                     bool is_member, bool untagged)
1109 {
1110         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1111         char *spvm_pl;
1112         int err;
1113
1114         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1115         if (!spvm_pl)
1116                 return -ENOMEM;
1117
1118         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1119                             vid_end, is_member, untagged);
1120         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1121         kfree(spvm_pl);
1122         return err;
1123 }
1124
1125 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1126                            u16 vid_end, bool is_member, bool untagged)
1127 {
1128         u16 vid, vid_e;
1129         int err;
1130
1131         for (vid = vid_begin; vid <= vid_end;
1132              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1133                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1134                             vid_end);
1135
1136                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1137                                                is_member, untagged);
1138                 if (err)
1139                         return err;
1140         }
1141
1142         return 0;
1143 }
1144
1145 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1146                                      bool flush_default)
1147 {
1148         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1149
1150         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1151                                  &mlxsw_sp_port->vlans_list, list) {
1152                 if (!flush_default &&
1153                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1154                         continue;
1155                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1156         }
1157 }
1158
1159 static void
1160 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1161 {
1162         if (mlxsw_sp_port_vlan->bridge_port)
1163                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1164         else if (mlxsw_sp_port_vlan->fid)
1165                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1166 }
1167
1168 struct mlxsw_sp_port_vlan *
1169 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1170 {
1171         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1172         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1173         int err;
1174
1175         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1176         if (mlxsw_sp_port_vlan)
1177                 return ERR_PTR(-EEXIST);
1178
1179         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1180         if (err)
1181                 return ERR_PTR(err);
1182
1183         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1184         if (!mlxsw_sp_port_vlan) {
1185                 err = -ENOMEM;
1186                 goto err_port_vlan_alloc;
1187         }
1188
1189         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1190         mlxsw_sp_port_vlan->vid = vid;
1191         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1192
1193         return mlxsw_sp_port_vlan;
1194
1195 err_port_vlan_alloc:
1196         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1197         return ERR_PTR(err);
1198 }
1199
1200 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1201 {
1202         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1203         u16 vid = mlxsw_sp_port_vlan->vid;
1204
1205         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1206         list_del(&mlxsw_sp_port_vlan->list);
1207         kfree(mlxsw_sp_port_vlan);
1208         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1209 }
1210
1211 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1212                                  __be16 __always_unused proto, u16 vid)
1213 {
1214         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1215
1216         /* VLAN 0 is added to HW filter when device goes up, but it is
1217          * reserved in our case, so simply return.
1218          */
1219         if (!vid)
1220                 return 0;
1221
1222         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1223 }
1224
1225 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1226                                   __be16 __always_unused proto, u16 vid)
1227 {
1228         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1229         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1230
1231         /* VLAN 0 is removed from HW filter when device goes down, but
1232          * it is reserved in our case, so simply return.
1233          */
1234         if (!vid)
1235                 return 0;
1236
1237         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1238         if (!mlxsw_sp_port_vlan)
1239                 return 0;
1240         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1241
1242         return 0;
1243 }
1244
1245 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1246                                             size_t len)
1247 {
1248         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1249
1250         return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
1251                                                   mlxsw_sp_port->local_port,
1252                                                   name, len);
1253 }
1254
1255 static struct mlxsw_sp_port_mall_tc_entry *
1256 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1257                                  unsigned long cookie) {
1258         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1259
1260         list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1261                 if (mall_tc_entry->cookie == cookie)
1262                         return mall_tc_entry;
1263
1264         return NULL;
1265 }
1266
1267 static int
1268 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1269                                       struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1270                                       const struct tc_action *a,
1271                                       bool ingress)
1272 {
1273         enum mlxsw_sp_span_type span_type;
1274         struct net_device *to_dev;
1275
1276         to_dev = tcf_mirred_dev(a);
1277         if (!to_dev) {
1278                 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1279                 return -EINVAL;
1280         }
1281
1282         mirror->ingress = ingress;
1283         span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1284         return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
1285                                         true, &mirror->span_id);
1286 }
1287
1288 static void
1289 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1290                                       struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1291 {
1292         enum mlxsw_sp_span_type span_type;
1293
1294         span_type = mirror->ingress ?
1295                         MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1296         mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
1297                                  span_type, true);
1298 }
1299
1300 static int
1301 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1302                                       struct tc_cls_matchall_offload *cls,
1303                                       const struct tc_action *a,
1304                                       bool ingress)
1305 {
1306         int err;
1307
1308         if (!mlxsw_sp_port->sample)
1309                 return -EOPNOTSUPP;
1310         if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1311                 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1312                 return -EEXIST;
1313         }
1314         if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1315                 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1316                 return -EOPNOTSUPP;
1317         }
1318
1319         rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1320                            tcf_sample_psample_group(a));
1321         mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1322         mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1323         mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1324
1325         err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1326         if (err)
1327                 goto err_port_sample_set;
1328         return 0;
1329
1330 err_port_sample_set:
1331         RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1332         return err;
1333 }
1334
1335 static void
1336 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1337 {
1338         if (!mlxsw_sp_port->sample)
1339                 return;
1340
1341         mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1342         RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1343 }
1344
1345 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1346                                           struct tc_cls_matchall_offload *f,
1347                                           bool ingress)
1348 {
1349         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1350         __be16 protocol = f->common.protocol;
1351         const struct tc_action *a;
1352         int err;
1353
1354         if (!tcf_exts_has_one_action(f->exts)) {
1355                 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1356                 return -EOPNOTSUPP;
1357         }
1358
1359         mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1360         if (!mall_tc_entry)
1361                 return -ENOMEM;
1362         mall_tc_entry->cookie = f->cookie;
1363
1364         a = tcf_exts_first_action(f->exts);
1365
1366         if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1367                 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1368
1369                 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1370                 mirror = &mall_tc_entry->mirror;
1371                 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1372                                                             mirror, a, ingress);
1373         } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1374                 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1375                 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1376                                                             a, ingress);
1377         } else {
1378                 err = -EOPNOTSUPP;
1379         }
1380
1381         if (err)
1382                 goto err_add_action;
1383
1384         list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1385         return 0;
1386
1387 err_add_action:
1388         kfree(mall_tc_entry);
1389         return err;
1390 }
1391
1392 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1393                                            struct tc_cls_matchall_offload *f)
1394 {
1395         struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1396
1397         mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1398                                                          f->cookie);
1399         if (!mall_tc_entry) {
1400                 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1401                 return;
1402         }
1403         list_del(&mall_tc_entry->list);
1404
1405         switch (mall_tc_entry->type) {
1406         case MLXSW_SP_PORT_MALL_MIRROR:
1407                 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1408                                                       &mall_tc_entry->mirror);
1409                 break;
1410         case MLXSW_SP_PORT_MALL_SAMPLE:
1411                 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1412                 break;
1413         default:
1414                 WARN_ON(1);
1415         }
1416
1417         kfree(mall_tc_entry);
1418 }
1419
1420 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1421                                           struct tc_cls_matchall_offload *f,
1422                                           bool ingress)
1423 {
1424         switch (f->command) {
1425         case TC_CLSMATCHALL_REPLACE:
1426                 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1427                                                       ingress);
1428         case TC_CLSMATCHALL_DESTROY:
1429                 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1430                 return 0;
1431         default:
1432                 return -EOPNOTSUPP;
1433         }
1434 }
1435
1436 static int
1437 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
1438                              struct tc_cls_flower_offload *f)
1439 {
1440         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
1441
1442         switch (f->command) {
1443         case TC_CLSFLOWER_REPLACE:
1444                 return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
1445         case TC_CLSFLOWER_DESTROY:
1446                 mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
1447                 return 0;
1448         case TC_CLSFLOWER_STATS:
1449                 return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
1450         case TC_CLSFLOWER_TMPLT_CREATE:
1451                 return mlxsw_sp_flower_tmplt_create(mlxsw_sp, acl_block, f);
1452         case TC_CLSFLOWER_TMPLT_DESTROY:
1453                 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, acl_block, f);
1454                 return 0;
1455         default:
1456                 return -EOPNOTSUPP;
1457         }
1458 }
1459
1460 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
1461                                                void *type_data,
1462                                                void *cb_priv, bool ingress)
1463 {
1464         struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
1465
1466         switch (type) {
1467         case TC_SETUP_CLSMATCHALL:
1468                 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
1469                                                    type_data))
1470                         return -EOPNOTSUPP;
1471
1472                 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
1473                                                       ingress);
1474         case TC_SETUP_CLSFLOWER:
1475                 return 0;
1476         default:
1477                 return -EOPNOTSUPP;
1478         }
1479 }
1480
1481 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
1482                                                   void *type_data,
1483                                                   void *cb_priv)
1484 {
1485         return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1486                                                    cb_priv, true);
1487 }
1488
1489 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
1490                                                   void *type_data,
1491                                                   void *cb_priv)
1492 {
1493         return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
1494                                                    cb_priv, false);
1495 }
1496
1497 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
1498                                              void *type_data, void *cb_priv)
1499 {
1500         struct mlxsw_sp_acl_block *acl_block = cb_priv;
1501
1502         switch (type) {
1503         case TC_SETUP_CLSMATCHALL:
1504                 return 0;
1505         case TC_SETUP_CLSFLOWER:
1506                 if (mlxsw_sp_acl_block_disabled(acl_block))
1507                         return -EOPNOTSUPP;
1508
1509                 return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
1510         default:
1511                 return -EOPNOTSUPP;
1512         }
1513 }
1514
1515 static int
1516 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1517                                     struct tcf_block *block, bool ingress,
1518                                     struct netlink_ext_ack *extack)
1519 {
1520         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1521         struct mlxsw_sp_acl_block *acl_block;
1522         struct tcf_block_cb *block_cb;
1523         int err;
1524
1525         block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1526                                        mlxsw_sp);
1527         if (!block_cb) {
1528                 acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
1529                 if (!acl_block)
1530                         return -ENOMEM;
1531                 block_cb = __tcf_block_cb_register(block,
1532                                                    mlxsw_sp_setup_tc_block_cb_flower,
1533                                                    mlxsw_sp, acl_block, extack);
1534                 if (IS_ERR(block_cb)) {
1535                         err = PTR_ERR(block_cb);
1536                         goto err_cb_register;
1537                 }
1538         } else {
1539                 acl_block = tcf_block_cb_priv(block_cb);
1540         }
1541         tcf_block_cb_incref(block_cb);
1542         err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
1543                                       mlxsw_sp_port, ingress);
1544         if (err)
1545                 goto err_block_bind;
1546
1547         if (ingress)
1548                 mlxsw_sp_port->ing_acl_block = acl_block;
1549         else
1550                 mlxsw_sp_port->eg_acl_block = acl_block;
1551
1552         return 0;
1553
1554 err_block_bind:
1555         if (!tcf_block_cb_decref(block_cb)) {
1556                 __tcf_block_cb_unregister(block, block_cb);
1557 err_cb_register:
1558                 mlxsw_sp_acl_block_destroy(acl_block);
1559         }
1560         return err;
1561 }
1562
1563 static void
1564 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1565                                       struct tcf_block *block, bool ingress)
1566 {
1567         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1568         struct mlxsw_sp_acl_block *acl_block;
1569         struct tcf_block_cb *block_cb;
1570         int err;
1571
1572         block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
1573                                        mlxsw_sp);
1574         if (!block_cb)
1575                 return;
1576
1577         if (ingress)
1578                 mlxsw_sp_port->ing_acl_block = NULL;
1579         else
1580                 mlxsw_sp_port->eg_acl_block = NULL;
1581
1582         acl_block = tcf_block_cb_priv(block_cb);
1583         err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
1584                                         mlxsw_sp_port, ingress);
1585         if (!err && !tcf_block_cb_decref(block_cb)) {
1586                 __tcf_block_cb_unregister(block, block_cb);
1587                 mlxsw_sp_acl_block_destroy(acl_block);
1588         }
1589 }
1590
1591 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1592                                    struct tc_block_offload *f)
1593 {
1594         tc_setup_cb_t *cb;
1595         bool ingress;
1596         int err;
1597
1598         if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1599                 cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
1600                 ingress = true;
1601         } else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1602                 cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
1603                 ingress = false;
1604         } else {
1605                 return -EOPNOTSUPP;
1606         }
1607
1608         switch (f->command) {
1609         case TC_BLOCK_BIND:
1610                 err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
1611                                             mlxsw_sp_port, f->extack);
1612                 if (err)
1613                         return err;
1614                 err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
1615                                                           f->block, ingress,
1616                                                           f->extack);
1617                 if (err) {
1618                         tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1619                         return err;
1620                 }
1621                 return 0;
1622         case TC_BLOCK_UNBIND:
1623                 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
1624                                                       f->block, ingress);
1625                 tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
1626                 return 0;
1627         default:
1628                 return -EOPNOTSUPP;
1629         }
1630 }
1631
1632 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1633                              void *type_data)
1634 {
1635         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1636
1637         switch (type) {
1638         case TC_SETUP_BLOCK:
1639                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1640         case TC_SETUP_QDISC_RED:
1641                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1642         case TC_SETUP_QDISC_PRIO:
1643                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1644         default:
1645                 return -EOPNOTSUPP;
1646         }
1647 }
1648
1649
1650 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1651 {
1652         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1653
1654         if (!enable) {
1655                 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
1656                     mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
1657                     !list_empty(&mlxsw_sp_port->mall_tc_list)) {
1658                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1659                         return -EINVAL;
1660                 }
1661                 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
1662                 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
1663         } else {
1664                 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
1665                 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
1666         }
1667         return 0;
1668 }
1669
1670 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1671
1672 static int mlxsw_sp_handle_feature(struct net_device *dev,
1673                                    netdev_features_t wanted_features,
1674                                    netdev_features_t feature,
1675                                    mlxsw_sp_feature_handler feature_handler)
1676 {
1677         netdev_features_t changes = wanted_features ^ dev->features;
1678         bool enable = !!(wanted_features & feature);
1679         int err;
1680
1681         if (!(changes & feature))
1682                 return 0;
1683
1684         err = feature_handler(dev, enable);
1685         if (err) {
1686                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1687                            enable ? "Enable" : "Disable", &feature, err);
1688                 return err;
1689         }
1690
1691         if (enable)
1692                 dev->features |= feature;
1693         else
1694                 dev->features &= ~feature;
1695
1696         return 0;
1697 }
1698 static int mlxsw_sp_set_features(struct net_device *dev,
1699                                  netdev_features_t features)
1700 {
1701         return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1702                                        mlxsw_sp_feature_hw_tc);
1703 }
1704
1705 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1706         .ndo_open               = mlxsw_sp_port_open,
1707         .ndo_stop               = mlxsw_sp_port_stop,
1708         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1709         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1710         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1711         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1712         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1713         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1714         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1715         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1716         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1717         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1718         .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1719         .ndo_set_features       = mlxsw_sp_set_features,
1720 };
1721
1722 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1723                                       struct ethtool_drvinfo *drvinfo)
1724 {
1725         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1726         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1727
1728         strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
1729                 sizeof(drvinfo->driver));
1730         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1731                 sizeof(drvinfo->version));
1732         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1733                  "%d.%d.%d",
1734                  mlxsw_sp->bus_info->fw_rev.major,
1735                  mlxsw_sp->bus_info->fw_rev.minor,
1736                  mlxsw_sp->bus_info->fw_rev.subminor);
1737         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1738                 sizeof(drvinfo->bus_info));
1739 }
1740
1741 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1742                                          struct ethtool_pauseparam *pause)
1743 {
1744         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1745
1746         pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1747         pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1748 }
1749
1750 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1751                                    struct ethtool_pauseparam *pause)
1752 {
1753         char pfcc_pl[MLXSW_REG_PFCC_LEN];
1754
1755         mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1756         mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1757         mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1758
1759         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1760                                pfcc_pl);
1761 }
1762
1763 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1764                                         struct ethtool_pauseparam *pause)
1765 {
1766         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1767         bool pause_en = pause->tx_pause || pause->rx_pause;
1768         int err;
1769
1770         if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1771                 netdev_err(dev, "PFC already enabled on port\n");
1772                 return -EINVAL;
1773         }
1774
1775         if (pause->autoneg) {
1776                 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1777                 return -EINVAL;
1778         }
1779
1780         err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1781         if (err) {
1782                 netdev_err(dev, "Failed to configure port's headroom\n");
1783                 return err;
1784         }
1785
1786         err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1787         if (err) {
1788                 netdev_err(dev, "Failed to set PAUSE parameters\n");
1789                 goto err_port_pause_configure;
1790         }
1791
1792         mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1793         mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1794
1795         return 0;
1796
1797 err_port_pause_configure:
1798         pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1799         mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1800         return err;
1801 }
1802
1803 struct mlxsw_sp_port_hw_stats {
1804         char str[ETH_GSTRING_LEN];
1805         u64 (*getter)(const char *payload);
1806         bool cells_bytes;
1807 };
1808
1809 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1810         {
1811                 .str = "a_frames_transmitted_ok",
1812                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1813         },
1814         {
1815                 .str = "a_frames_received_ok",
1816                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1817         },
1818         {
1819                 .str = "a_frame_check_sequence_errors",
1820                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1821         },
1822         {
1823                 .str = "a_alignment_errors",
1824                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1825         },
1826         {
1827                 .str = "a_octets_transmitted_ok",
1828                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1829         },
1830         {
1831                 .str = "a_octets_received_ok",
1832                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1833         },
1834         {
1835                 .str = "a_multicast_frames_xmitted_ok",
1836                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1837         },
1838         {
1839                 .str = "a_broadcast_frames_xmitted_ok",
1840                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1841         },
1842         {
1843                 .str = "a_multicast_frames_received_ok",
1844                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1845         },
1846         {
1847                 .str = "a_broadcast_frames_received_ok",
1848                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1849         },
1850         {
1851                 .str = "a_in_range_length_errors",
1852                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1853         },
1854         {
1855                 .str = "a_out_of_range_length_field",
1856                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1857         },
1858         {
1859                 .str = "a_frame_too_long_errors",
1860                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1861         },
1862         {
1863                 .str = "a_symbol_error_during_carrier",
1864                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1865         },
1866         {
1867                 .str = "a_mac_control_frames_transmitted",
1868                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1869         },
1870         {
1871                 .str = "a_mac_control_frames_received",
1872                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1873         },
1874         {
1875                 .str = "a_unsupported_opcodes_received",
1876                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1877         },
1878         {
1879                 .str = "a_pause_mac_ctrl_frames_received",
1880                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1881         },
1882         {
1883                 .str = "a_pause_mac_ctrl_frames_xmitted",
1884                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1885         },
1886 };
1887
1888 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1889
1890 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
1891         {
1892                 .str = "if_in_discards",
1893                 .getter = mlxsw_reg_ppcnt_if_in_discards_get,
1894         },
1895         {
1896                 .str = "if_out_discards",
1897                 .getter = mlxsw_reg_ppcnt_if_out_discards_get,
1898         },
1899         {
1900                 .str = "if_out_errors",
1901                 .getter = mlxsw_reg_ppcnt_if_out_errors_get,
1902         },
1903 };
1904
1905 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
1906         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
1907
1908 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
1909         {
1910                 .str = "ether_stats_undersize_pkts",
1911                 .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
1912         },
1913         {
1914                 .str = "ether_stats_oversize_pkts",
1915                 .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
1916         },
1917         {
1918                 .str = "ether_stats_fragments",
1919                 .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
1920         },
1921         {
1922                 .str = "ether_pkts64octets",
1923                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
1924         },
1925         {
1926                 .str = "ether_pkts65to127octets",
1927                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
1928         },
1929         {
1930                 .str = "ether_pkts128to255octets",
1931                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
1932         },
1933         {
1934                 .str = "ether_pkts256to511octets",
1935                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
1936         },
1937         {
1938                 .str = "ether_pkts512to1023octets",
1939                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
1940         },
1941         {
1942                 .str = "ether_pkts1024to1518octets",
1943                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
1944         },
1945         {
1946                 .str = "ether_pkts1519to2047octets",
1947                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
1948         },
1949         {
1950                 .str = "ether_pkts2048to4095octets",
1951                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
1952         },
1953         {
1954                 .str = "ether_pkts4096to8191octets",
1955                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
1956         },
1957         {
1958                 .str = "ether_pkts8192to10239octets",
1959                 .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
1960         },
1961 };
1962
1963 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
1964         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
1965
1966 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
1967         {
1968                 .str = "dot3stats_fcs_errors",
1969                 .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
1970         },
1971         {
1972                 .str = "dot3stats_symbol_errors",
1973                 .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
1974         },
1975         {
1976                 .str = "dot3control_in_unknown_opcodes",
1977                 .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
1978         },
1979         {
1980                 .str = "dot3in_pause_frames",
1981                 .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
1982         },
1983 };
1984
1985 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
1986         ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
1987
1988 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
1989         {
1990                 .str = "discard_ingress_general",
1991                 .getter = mlxsw_reg_ppcnt_ingress_general_get,
1992         },
1993         {
1994                 .str = "discard_ingress_policy_engine",
1995                 .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
1996         },
1997         {
1998                 .str = "discard_ingress_vlan_membership",
1999                 .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
2000         },
2001         {
2002                 .str = "discard_ingress_tag_frame_type",
2003                 .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
2004         },
2005         {
2006                 .str = "discard_egress_vlan_membership",
2007                 .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
2008         },
2009         {
2010                 .str = "discard_loopback_filter",
2011                 .getter = mlxsw_reg_ppcnt_loopback_filter_get,
2012         },
2013         {
2014                 .str = "discard_egress_general",
2015                 .getter = mlxsw_reg_ppcnt_egress_general_get,
2016         },
2017         {
2018                 .str = "discard_egress_hoq",
2019                 .getter = mlxsw_reg_ppcnt_egress_hoq_get,
2020         },
2021         {
2022                 .str = "discard_egress_policy_engine",
2023                 .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
2024         },
2025         {
2026                 .str = "discard_ingress_tx_link_down",
2027                 .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
2028         },
2029         {
2030                 .str = "discard_egress_stp_filter",
2031                 .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
2032         },
2033         {
2034                 .str = "discard_egress_sll",
2035                 .getter = mlxsw_reg_ppcnt_egress_sll_get,
2036         },
2037 };
2038
2039 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
2040         ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
2041
2042 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
2043         {
2044                 .str = "rx_octets_prio",
2045                 .getter = mlxsw_reg_ppcnt_rx_octets_get,
2046         },
2047         {
2048                 .str = "rx_frames_prio",
2049                 .getter = mlxsw_reg_ppcnt_rx_frames_get,
2050         },
2051         {
2052                 .str = "tx_octets_prio",
2053                 .getter = mlxsw_reg_ppcnt_tx_octets_get,
2054         },
2055         {
2056                 .str = "tx_frames_prio",
2057                 .getter = mlxsw_reg_ppcnt_tx_frames_get,
2058         },
2059         {
2060                 .str = "rx_pause_prio",
2061                 .getter = mlxsw_reg_ppcnt_rx_pause_get,
2062         },
2063         {
2064                 .str = "rx_pause_duration_prio",
2065                 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
2066         },
2067         {
2068                 .str = "tx_pause_prio",
2069                 .getter = mlxsw_reg_ppcnt_tx_pause_get,
2070         },
2071         {
2072                 .str = "tx_pause_duration_prio",
2073                 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
2074         },
2075 };
2076
2077 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2078
2079 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
2080         {
2081                 .str = "tc_transmit_queue_tc",
2082                 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2083                 .cells_bytes = true,
2084         },
2085         {
2086                 .str = "tc_no_buffer_discard_uc_tc",
2087                 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2088         },
2089 };
2090
2091 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2092
2093 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2094                                          MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
2095                                          MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
2096                                          MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
2097                                          MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
2098                                          (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
2099                                           IEEE_8021QAZ_MAX_TCS) + \
2100                                          (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
2101                                           TC_MAX_QUEUE))
2102
2103 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2104 {
2105         int i;
2106
2107         for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2108                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2109                          mlxsw_sp_port_hw_prio_stats[i].str, prio);
2110                 *p += ETH_GSTRING_LEN;
2111         }
2112 }
2113
2114 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2115 {
2116         int i;
2117
2118         for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2119                 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2120                          mlxsw_sp_port_hw_tc_stats[i].str, tc);
2121                 *p += ETH_GSTRING_LEN;
2122         }
2123 }
2124
2125 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2126                                       u32 stringset, u8 *data)
2127 {
2128         u8 *p = data;
2129         int i;
2130
2131         switch (stringset) {
2132         case ETH_SS_STATS:
2133                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2134                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2135                                ETH_GSTRING_LEN);
2136                         p += ETH_GSTRING_LEN;
2137                 }
2138
2139                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
2140                         memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
2141                                ETH_GSTRING_LEN);
2142                         p += ETH_GSTRING_LEN;
2143                 }
2144
2145                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
2146                         memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
2147                                ETH_GSTRING_LEN);
2148                         p += ETH_GSTRING_LEN;
2149                 }
2150
2151                 for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
2152                         memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
2153                                ETH_GSTRING_LEN);
2154                         p += ETH_GSTRING_LEN;
2155                 }
2156
2157                 for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
2158                         memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
2159                                ETH_GSTRING_LEN);
2160                         p += ETH_GSTRING_LEN;
2161                 }
2162
2163                 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2164                         mlxsw_sp_port_get_prio_strings(&p, i);
2165
2166                 for (i = 0; i < TC_MAX_QUEUE; i++)
2167                         mlxsw_sp_port_get_tc_strings(&p, i);
2168
2169                 break;
2170         }
2171 }
2172
2173 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2174                                      enum ethtool_phys_id_state state)
2175 {
2176         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2177         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2178         char mlcr_pl[MLXSW_REG_MLCR_LEN];
2179         bool active;
2180
2181         switch (state) {
2182         case ETHTOOL_ID_ACTIVE:
2183                 active = true;
2184                 break;
2185         case ETHTOOL_ID_INACTIVE:
2186                 active = false;
2187                 break;
2188         default:
2189                 return -EOPNOTSUPP;
2190         }
2191
2192         mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2193         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2194 }
2195
2196 static int
2197 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2198                                int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2199 {
2200         switch (grp) {
2201         case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2202                 *p_hw_stats = mlxsw_sp_port_hw_stats;
2203                 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2204                 break;
2205         case MLXSW_REG_PPCNT_RFC_2863_CNT:
2206                 *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
2207                 *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2208                 break;
2209         case MLXSW_REG_PPCNT_RFC_2819_CNT:
2210                 *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
2211                 *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2212                 break;
2213         case MLXSW_REG_PPCNT_RFC_3635_CNT:
2214                 *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
2215                 *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2216                 break;
2217         case MLXSW_REG_PPCNT_DISCARD_CNT:
2218                 *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
2219                 *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2220                 break;
2221         case MLXSW_REG_PPCNT_PRIO_CNT:
2222                 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2223                 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2224                 break;
2225         case MLXSW_REG_PPCNT_TC_CNT:
2226                 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2227                 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2228                 break;
2229         default:
2230                 WARN_ON(1);
2231                 return -EOPNOTSUPP;
2232         }
2233         return 0;
2234 }
2235
2236 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2237                                       enum mlxsw_reg_ppcnt_grp grp, int prio,
2238                                       u64 *data, int data_index)
2239 {
2240         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2241         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2242         struct mlxsw_sp_port_hw_stats *hw_stats;
2243         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2244         int i, len;
2245         int err;
2246
2247         err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2248         if (err)
2249                 return;
2250         mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2251         for (i = 0; i < len; i++) {
2252                 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2253                 if (!hw_stats[i].cells_bytes)
2254                         continue;
2255                 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2256                                                             data[data_index + i]);
2257         }
2258 }
2259
2260 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2261                                     struct ethtool_stats *stats, u64 *data)
2262 {
2263         int i, data_index = 0;
2264
2265         /* IEEE 802.3 Counters */
2266         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2267                                   data, data_index);
2268         data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2269
2270         /* RFC 2863 Counters */
2271         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
2272                                   data, data_index);
2273         data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
2274
2275         /* RFC 2819 Counters */
2276         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
2277                                   data, data_index);
2278         data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
2279
2280         /* RFC 3635 Counters */
2281         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
2282                                   data, data_index);
2283         data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
2284
2285         /* Discard Counters */
2286         __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
2287                                   data, data_index);
2288         data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
2289
2290         /* Per-Priority Counters */
2291         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2292                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2293                                           data, data_index);
2294                 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2295         }
2296
2297         /* Per-TC Counters */
2298         for (i = 0; i < TC_MAX_QUEUE; i++) {
2299                 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2300                                           data, data_index);
2301                 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2302         }
2303 }
2304
2305 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2306 {
2307         switch (sset) {
2308         case ETH_SS_STATS:
2309                 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2310         default:
2311                 return -EOPNOTSUPP;
2312         }
2313 }
2314
2315 struct mlxsw_sp_port_link_mode {
2316         enum ethtool_link_mode_bit_indices mask_ethtool;
2317         u32 mask;
2318         u32 speed;
2319 };
2320
2321 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2322         {
2323                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2324                 .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2325                 .speed          = SPEED_100,
2326         },
2327         {
2328                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2329                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2330                 .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2331                 .speed          = SPEED_1000,
2332         },
2333         {
2334                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2335                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2336                 .speed          = SPEED_10000,
2337         },
2338         {
2339                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2340                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2341                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2342                 .speed          = SPEED_10000,
2343         },
2344         {
2345                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2346                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2347                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2348                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2349                 .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2350                 .speed          = SPEED_10000,
2351         },
2352         {
2353                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2354                 .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2355                 .speed          = SPEED_20000,
2356         },
2357         {
2358                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2359                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2360                 .speed          = SPEED_40000,
2361         },
2362         {
2363                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2364                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2365                 .speed          = SPEED_40000,
2366         },
2367         {
2368                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2369                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2370                 .speed          = SPEED_40000,
2371         },
2372         {
2373                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2374                 .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2375                 .speed          = SPEED_40000,
2376         },
2377         {
2378                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2379                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2380                 .speed          = SPEED_25000,
2381         },
2382         {
2383                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2384                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2385                 .speed          = SPEED_25000,
2386         },
2387         {
2388                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2389                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2390                 .speed          = SPEED_25000,
2391         },
2392         {
2393                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2394                 .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2395                 .speed          = SPEED_25000,
2396         },
2397         {
2398                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2399                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2400                 .speed          = SPEED_50000,
2401         },
2402         {
2403                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2404                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2405                 .speed          = SPEED_50000,
2406         },
2407         {
2408                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2409                 .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2410                 .speed          = SPEED_50000,
2411         },
2412         {
2413                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2414                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2415                 .speed          = SPEED_56000,
2416         },
2417         {
2418                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2419                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2420                 .speed          = SPEED_56000,
2421         },
2422         {
2423                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2424                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2425                 .speed          = SPEED_56000,
2426         },
2427         {
2428                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2429                 .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2430                 .speed          = SPEED_56000,
2431         },
2432         {
2433                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2434                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2435                 .speed          = SPEED_100000,
2436         },
2437         {
2438                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2439                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2440                 .speed          = SPEED_100000,
2441         },
2442         {
2443                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2444                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2445                 .speed          = SPEED_100000,
2446         },
2447         {
2448                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2449                 .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2450                 .speed          = SPEED_100000,
2451         },
2452 };
2453
2454 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2455
2456 static void
2457 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2458                                   struct ethtool_link_ksettings *cmd)
2459 {
2460         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2461                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2462                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2463                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2464                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2465                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2466                 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2467
2468         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2469                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2470                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2471                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2472                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2473                 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2474 }
2475
2476 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2477 {
2478         int i;
2479
2480         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2481                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2482                         __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2483                                   mode);
2484         }
2485 }
2486
2487 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2488                                             struct ethtool_link_ksettings *cmd)
2489 {
2490         u32 speed = SPEED_UNKNOWN;
2491         u8 duplex = DUPLEX_UNKNOWN;
2492         int i;
2493
2494         if (!carrier_ok)
2495                 goto out;
2496
2497         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2498                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2499                         speed = mlxsw_sp_port_link_mode[i].speed;
2500                         duplex = DUPLEX_FULL;
2501                         break;
2502                 }
2503         }
2504 out:
2505         cmd->base.speed = speed;
2506         cmd->base.duplex = duplex;
2507 }
2508
2509 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2510 {
2511         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2512                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2513                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2514                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2515                 return PORT_FIBRE;
2516
2517         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2518                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2519                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2520                 return PORT_DA;
2521
2522         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2523                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2524                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2525                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2526                 return PORT_NONE;
2527
2528         return PORT_OTHER;
2529 }
2530
2531 static u32
2532 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2533 {
2534         u32 ptys_proto = 0;
2535         int i;
2536
2537         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2538                 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2539                              cmd->link_modes.advertising))
2540                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2541         }
2542         return ptys_proto;
2543 }
2544
2545 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2546 {
2547         u32 ptys_proto = 0;
2548         int i;
2549
2550         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2551                 if (speed == mlxsw_sp_port_link_mode[i].speed)
2552                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2553         }
2554         return ptys_proto;
2555 }
2556
2557 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2558 {
2559         u32 ptys_proto = 0;
2560         int i;
2561
2562         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2563                 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2564                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2565         }
2566         return ptys_proto;
2567 }
2568
2569 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2570                                              struct ethtool_link_ksettings *cmd)
2571 {
2572         ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2573         ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2574         ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2575
2576         mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2577         mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2578 }
2579
2580 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2581                                              struct ethtool_link_ksettings *cmd)
2582 {
2583         if (!autoneg)
2584                 return;
2585
2586         ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2587         mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2588 }
2589
2590 static void
2591 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2592                                     struct ethtool_link_ksettings *cmd)
2593 {
2594         if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2595                 return;
2596
2597         ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2598         mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2599 }
2600
2601 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2602                                             struct ethtool_link_ksettings *cmd)
2603 {
2604         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2605         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2606         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2607         char ptys_pl[MLXSW_REG_PTYS_LEN];
2608         u8 autoneg_status;
2609         bool autoneg;
2610         int err;
2611
2612         autoneg = mlxsw_sp_port->link.autoneg;
2613         mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2614         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2615         if (err)
2616                 return err;
2617         mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2618                                   &eth_proto_oper);
2619
2620         mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2621
2622         mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2623
2624         eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2625         autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2626         mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2627
2628         cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2629         cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2630         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2631                                         cmd);
2632
2633         return 0;
2634 }
2635
2636 static int
2637 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2638                                  const struct ethtool_link_ksettings *cmd)
2639 {
2640         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2641         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2642         char ptys_pl[MLXSW_REG_PTYS_LEN];
2643         u32 eth_proto_cap, eth_proto_new;
2644         bool autoneg;
2645         int err;
2646
2647         mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
2648         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2649         if (err)
2650                 return err;
2651         mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2652
2653         autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2654         eth_proto_new = autoneg ?
2655                 mlxsw_sp_to_ptys_advert_link(cmd) :
2656                 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2657
2658         eth_proto_new = eth_proto_new & eth_proto_cap;
2659         if (!eth_proto_new) {
2660                 netdev_err(dev, "No supported speed requested\n");
2661                 return -EINVAL;
2662         }
2663
2664         mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2665                                 eth_proto_new, autoneg);
2666         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2667         if (err)
2668                 return err;
2669
2670         if (!netif_running(dev))
2671                 return 0;
2672
2673         mlxsw_sp_port->link.autoneg = autoneg;
2674
2675         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2676         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2677
2678         return 0;
2679 }
2680
2681 static int mlxsw_sp_flash_device(struct net_device *dev,
2682                                  struct ethtool_flash *flash)
2683 {
2684         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2685         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2686         const struct firmware *firmware;
2687         int err;
2688
2689         if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2690                 return -EOPNOTSUPP;
2691
2692         dev_hold(dev);
2693         rtnl_unlock();
2694
2695         err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2696         if (err)
2697                 goto out;
2698         err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2699         release_firmware(firmware);
2700 out:
2701         rtnl_lock();
2702         dev_put(dev);
2703         return err;
2704 }
2705
2706 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2707 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2708 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2709
2710 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2711                                         u16 offset, u16 size, void *data,
2712                                         unsigned int *p_read_size)
2713 {
2714         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2715         char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2716         char mcia_pl[MLXSW_REG_MCIA_LEN];
2717         u16 i2c_addr;
2718         int status;
2719         int err;
2720
2721         size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2722
2723         if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2724             offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2725                 /* Cross pages read, read until offset 256 in low page */
2726                 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2727
2728         i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2729         if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2730                 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2731                 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2732         }
2733
2734         mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2735                             0, 0, offset, size, i2c_addr);
2736
2737         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2738         if (err)
2739                 return err;
2740
2741         status = mlxsw_reg_mcia_status_get(mcia_pl);
2742         if (status)
2743                 return -EIO;
2744
2745         mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2746         memcpy(data, eeprom_tmp, size);
2747         *p_read_size = size;
2748
2749         return 0;
2750 }
2751
2752 enum mlxsw_sp_eeprom_module_info_rev_id {
2753         MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC      = 0x00,
2754         MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436       = 0x01,
2755         MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636       = 0x03,
2756 };
2757
2758 enum mlxsw_sp_eeprom_module_info_id {
2759         MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP              = 0x03,
2760         MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP             = 0x0C,
2761         MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS        = 0x0D,
2762         MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28           = 0x11,
2763 };
2764
2765 enum mlxsw_sp_eeprom_module_info {
2766         MLXSW_SP_EEPROM_MODULE_INFO_ID,
2767         MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2768         MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2769 };
2770
2771 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2772                                     struct ethtool_modinfo *modinfo)
2773 {
2774         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2775         u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2776         u8 module_rev_id, module_id;
2777         unsigned int read_size;
2778         int err;
2779
2780         err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2781                                            MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2782                                            module_info, &read_size);
2783         if (err)
2784                 return err;
2785
2786         if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2787                 return -EIO;
2788
2789         module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2790         module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2791
2792         switch (module_id) {
2793         case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2794                 modinfo->type       = ETH_MODULE_SFF_8436;
2795                 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2796                 break;
2797         case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2798         case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2799                 if (module_id  == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2800                     module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2801                         modinfo->type       = ETH_MODULE_SFF_8636;
2802                         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2803                 } else {
2804                         modinfo->type       = ETH_MODULE_SFF_8436;
2805                         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2806                 }
2807                 break;
2808         case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2809                 modinfo->type       = ETH_MODULE_SFF_8472;
2810                 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2811                 break;
2812         default:
2813                 return -EINVAL;
2814         }
2815
2816         return 0;
2817 }
2818
2819 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2820                                       struct ethtool_eeprom *ee,
2821                                       u8 *data)
2822 {
2823         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2824         int offset = ee->offset;
2825         unsigned int read_size;
2826         int i = 0;
2827         int err;
2828
2829         if (!ee->len)
2830                 return -EINVAL;
2831
2832         memset(data, 0, ee->len);
2833
2834         while (i < ee->len) {
2835                 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2836                                                    ee->len - i, data + i,
2837                                                    &read_size);
2838                 if (err) {
2839                         netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2840                         return err;
2841                 }
2842
2843                 i += read_size;
2844                 offset += read_size;
2845         }
2846
2847         return 0;
2848 }
2849
2850 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2851         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
2852         .get_link               = ethtool_op_get_link,
2853         .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
2854         .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
2855         .get_strings            = mlxsw_sp_port_get_strings,
2856         .set_phys_id            = mlxsw_sp_port_set_phys_id,
2857         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
2858         .get_sset_count         = mlxsw_sp_port_get_sset_count,
2859         .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
2860         .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
2861         .flash_device           = mlxsw_sp_flash_device,
2862         .get_module_info        = mlxsw_sp_get_module_info,
2863         .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
2864 };
2865
2866 static int
2867 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2868 {
2869         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2870         u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2871         char ptys_pl[MLXSW_REG_PTYS_LEN];
2872         u32 eth_proto_admin;
2873
2874         eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2875         mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2876                                 eth_proto_admin, mlxsw_sp_port->link.autoneg);
2877         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2878 }
2879
2880 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2881                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2882                           bool dwrr, u8 dwrr_weight)
2883 {
2884         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2885         char qeec_pl[MLXSW_REG_QEEC_LEN];
2886
2887         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2888                             next_index);
2889         mlxsw_reg_qeec_de_set(qeec_pl, true);
2890         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2891         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2892         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2893 }
2894
2895 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2896                                   enum mlxsw_reg_qeec_hr hr, u8 index,
2897                                   u8 next_index, u32 maxrate)
2898 {
2899         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2900         char qeec_pl[MLXSW_REG_QEEC_LEN];
2901
2902         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2903                             next_index);
2904         mlxsw_reg_qeec_mase_set(qeec_pl, true);
2905         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2906         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2907 }
2908
2909 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
2910                                     enum mlxsw_reg_qeec_hr hr, u8 index,
2911                                     u8 next_index, u32 minrate)
2912 {
2913         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2914         char qeec_pl[MLXSW_REG_QEEC_LEN];
2915
2916         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2917                             next_index);
2918         mlxsw_reg_qeec_mise_set(qeec_pl, true);
2919         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
2920
2921         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2922 }
2923
2924 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2925                               u8 switch_prio, u8 tclass)
2926 {
2927         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2928         char qtct_pl[MLXSW_REG_QTCT_LEN];
2929
2930         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2931                             tclass);
2932         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2933 }
2934
2935 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2936 {
2937         int err, i;
2938
2939         /* Setup the elements hierarcy, so that each TC is linked to
2940          * one subgroup, which are all member in the same group.
2941          */
2942         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2943                                     MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2944                                     0);
2945         if (err)
2946                 return err;
2947         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2948                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2949                                             MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2950                                             0, false, 0);
2951                 if (err)
2952                         return err;
2953         }
2954         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2955                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2956                                             MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2957                                             false, 0);
2958                 if (err)
2959                         return err;
2960
2961                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2962                                             MLXSW_REG_QEEC_HIERARCY_TC,
2963                                             i + 8, i,
2964                                             false, 0);
2965                 if (err)
2966                         return err;
2967         }
2968
2969         /* Make sure the max shaper is disabled in all hierarchies that
2970          * support it.
2971          */
2972         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2973                                             MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2974                                             MLXSW_REG_QEEC_MAS_DIS);
2975         if (err)
2976                 return err;
2977         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2978                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2979                                                     MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2980                                                     i, 0,
2981                                                     MLXSW_REG_QEEC_MAS_DIS);
2982                 if (err)
2983                         return err;
2984         }
2985         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2986                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2987                                                     MLXSW_REG_QEEC_HIERARCY_TC,
2988                                                     i, i,
2989                                                     MLXSW_REG_QEEC_MAS_DIS);
2990                 if (err)
2991                         return err;
2992
2993                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2994                                                     MLXSW_REG_QEEC_HIERARCY_TC,
2995                                                     i + 8, i,
2996                                                     MLXSW_REG_QEEC_MAS_DIS);
2997                 if (err)
2998                         return err;
2999         }
3000
3001         /* Configure the min shaper for multicast TCs. */
3002         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3003                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
3004                                                MLXSW_REG_QEEC_HIERARCY_TC,
3005                                                i + 8, i,
3006                                                MLXSW_REG_QEEC_MIS_MIN);
3007                 if (err)
3008                         return err;
3009         }
3010
3011         /* Map all priorities to traffic class 0. */
3012         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
3013                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
3014                 if (err)
3015                         return err;
3016         }
3017
3018         return 0;
3019 }
3020
3021 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
3022                                         bool enable)
3023 {
3024         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3025         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
3026
3027         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
3028         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
3029 }
3030
3031 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
3032                                 bool split, u8 module, u8 width, u8 lane)
3033 {
3034         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3035         struct mlxsw_sp_port *mlxsw_sp_port;
3036         struct net_device *dev;
3037         int err;
3038
3039         err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
3040         if (err) {
3041                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
3042                         local_port);
3043                 return err;
3044         }
3045
3046         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
3047         if (!dev) {
3048                 err = -ENOMEM;
3049                 goto err_alloc_etherdev;
3050         }
3051         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
3052         mlxsw_sp_port = netdev_priv(dev);
3053         mlxsw_sp_port->dev = dev;
3054         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
3055         mlxsw_sp_port->local_port = local_port;
3056         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
3057         mlxsw_sp_port->split = split;
3058         mlxsw_sp_port->mapping.module = module;
3059         mlxsw_sp_port->mapping.width = width;
3060         mlxsw_sp_port->mapping.lane = lane;
3061         mlxsw_sp_port->link.autoneg = 1;
3062         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
3063         INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
3064
3065         mlxsw_sp_port->pcpu_stats =
3066                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
3067         if (!mlxsw_sp_port->pcpu_stats) {
3068                 err = -ENOMEM;
3069                 goto err_alloc_stats;
3070         }
3071
3072         mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
3073                                         GFP_KERNEL);
3074         if (!mlxsw_sp_port->sample) {
3075                 err = -ENOMEM;
3076                 goto err_alloc_sample;
3077         }
3078
3079         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
3080                           &update_stats_cache);
3081
3082         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
3083         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
3084
3085         err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
3086         if (err) {
3087                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
3088                         mlxsw_sp_port->local_port);
3089                 goto err_port_module_map;
3090         }
3091
3092         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
3093         if (err) {
3094                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
3095                         mlxsw_sp_port->local_port);
3096                 goto err_port_swid_set;
3097         }
3098
3099         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
3100         if (err) {
3101                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
3102                         mlxsw_sp_port->local_port);
3103                 goto err_dev_addr_init;
3104         }
3105
3106         netif_carrier_off(dev);
3107
3108         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
3109                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
3110         dev->hw_features |= NETIF_F_HW_TC;
3111
3112         dev->min_mtu = 0;
3113         dev->max_mtu = ETH_MAX_MTU;
3114
3115         /* Each packet needs to have a Tx header (metadata) on top all other
3116          * headers.
3117          */
3118         dev->needed_headroom = MLXSW_TXHDR_LEN;
3119
3120         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
3121         if (err) {
3122                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
3123                         mlxsw_sp_port->local_port);
3124                 goto err_port_system_port_mapping_set;
3125         }
3126
3127         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
3128         if (err) {
3129                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
3130                         mlxsw_sp_port->local_port);
3131                 goto err_port_speed_by_width_set;
3132         }
3133
3134         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
3135         if (err) {
3136                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
3137                         mlxsw_sp_port->local_port);
3138                 goto err_port_mtu_set;
3139         }
3140
3141         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
3142         if (err)
3143                 goto err_port_admin_status_set;
3144
3145         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
3146         if (err) {
3147                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
3148                         mlxsw_sp_port->local_port);
3149                 goto err_port_buffers_init;
3150         }
3151
3152         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
3153         if (err) {
3154                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
3155                         mlxsw_sp_port->local_port);
3156                 goto err_port_ets_init;
3157         }
3158
3159         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
3160         if (err) {
3161                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
3162                         mlxsw_sp_port->local_port);
3163                 goto err_port_tc_mc_mode;
3164         }
3165
3166         /* ETS and buffers must be initialized before DCB. */
3167         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
3168         if (err) {
3169                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
3170                         mlxsw_sp_port->local_port);
3171                 goto err_port_dcb_init;
3172         }
3173
3174         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
3175         if (err) {
3176                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
3177                         mlxsw_sp_port->local_port);
3178                 goto err_port_fids_init;
3179         }
3180
3181         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
3182         if (err) {
3183                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
3184                         mlxsw_sp_port->local_port);
3185                 goto err_port_qdiscs_init;
3186         }
3187
3188         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
3189         if (err) {
3190                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
3191                         mlxsw_sp_port->local_port);
3192                 goto err_port_nve_init;
3193         }
3194
3195         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3196         if (err) {
3197                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
3198                         mlxsw_sp_port->local_port);
3199                 goto err_port_pvid_set;
3200         }
3201
3202         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
3203                                                        MLXSW_SP_DEFAULT_VID);
3204         if (IS_ERR(mlxsw_sp_port_vlan)) {
3205                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
3206                         mlxsw_sp_port->local_port);
3207                 err = PTR_ERR(mlxsw_sp_port_vlan);
3208                 goto err_port_vlan_create;
3209         }
3210         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
3211
3212         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
3213         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3214         err = register_netdev(dev);
3215         if (err) {
3216                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3217                         mlxsw_sp_port->local_port);
3218                 goto err_register_netdev;
3219         }
3220
3221         mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3222                                 mlxsw_sp_port, dev, module + 1,
3223                                 mlxsw_sp_port->split, lane / width);
3224         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
3225         return 0;
3226
3227 err_register_netdev:
3228         mlxsw_sp->ports[local_port] = NULL;
3229         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3230         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
3231 err_port_vlan_create:
3232 err_port_pvid_set:
3233         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3234 err_port_nve_init:
3235         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3236 err_port_qdiscs_init:
3237         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3238 err_port_fids_init:
3239         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3240 err_port_dcb_init:
3241         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3242 err_port_tc_mc_mode:
3243 err_port_ets_init:
3244 err_port_buffers_init:
3245 err_port_admin_status_set:
3246 err_port_mtu_set:
3247 err_port_speed_by_width_set:
3248 err_port_system_port_mapping_set:
3249 err_dev_addr_init:
3250         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3251 err_port_swid_set:
3252         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3253 err_port_module_map:
3254         kfree(mlxsw_sp_port->sample);
3255 err_alloc_sample:
3256         free_percpu(mlxsw_sp_port->pcpu_stats);
3257 err_alloc_stats:
3258         free_netdev(dev);
3259 err_alloc_etherdev:
3260         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3261         return err;
3262 }
3263
3264 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3265 {
3266         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3267
3268         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
3269         mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3270         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3271         mlxsw_sp->ports[local_port] = NULL;
3272         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3273         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
3274         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
3275         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
3276         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3277         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3278         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
3279         mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3280         mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3281         kfree(mlxsw_sp_port->sample);
3282         free_percpu(mlxsw_sp_port->pcpu_stats);
3283         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3284         free_netdev(mlxsw_sp_port->dev);
3285         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3286 }
3287
3288 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3289 {
3290         return mlxsw_sp->ports[local_port] != NULL;
3291 }
3292
3293 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3294 {
3295         int i;
3296
3297         for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3298                 if (mlxsw_sp_port_created(mlxsw_sp, i))
3299                         mlxsw_sp_port_remove(mlxsw_sp, i);
3300         kfree(mlxsw_sp->port_to_module);
3301         kfree(mlxsw_sp->ports);
3302 }
3303
3304 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3305 {
3306         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3307         u8 module, width, lane;
3308         size_t alloc_size;
3309         int i;
3310         int err;
3311
3312         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3313         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3314         if (!mlxsw_sp->ports)
3315                 return -ENOMEM;
3316
3317         mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
3318                                                  GFP_KERNEL);
3319         if (!mlxsw_sp->port_to_module) {
3320                 err = -ENOMEM;
3321                 goto err_port_to_module_alloc;
3322         }
3323
3324         for (i = 1; i < max_ports; i++) {
3325                 /* Mark as invalid */
3326                 mlxsw_sp->port_to_module[i] = -1;
3327
3328                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3329                                                     &width, &lane);
3330                 if (err)
3331                         goto err_port_module_info_get;
3332                 if (!width)
3333                         continue;
3334                 mlxsw_sp->port_to_module[i] = module;
3335                 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3336                                            module, width, lane);
3337                 if (err)
3338                         goto err_port_create;
3339         }
3340         return 0;
3341
3342 err_port_create:
3343 err_port_module_info_get:
3344         for (i--; i >= 1; i--)
3345                 if (mlxsw_sp_port_created(mlxsw_sp, i))
3346                         mlxsw_sp_port_remove(mlxsw_sp, i);
3347         kfree(mlxsw_sp->port_to_module);
3348 err_port_to_module_alloc:
3349         kfree(mlxsw_sp->ports);
3350         return err;
3351 }
3352
3353 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3354 {
3355         u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3356
3357         return local_port - offset;
3358 }
3359
3360 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3361                                       u8 module, unsigned int count)
3362 {
3363         u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3364         int err, i;
3365
3366         for (i = 0; i < count; i++) {
3367                 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3368                                            module, width, i * width);
3369                 if (err)
3370                         goto err_port_create;
3371         }
3372
3373         return 0;
3374
3375 err_port_create:
3376         for (i--; i >= 0; i--)
3377                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3378                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3379         return err;
3380 }
3381
3382 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3383                                          u8 base_port, unsigned int count)
3384 {
3385         u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3386         int i;
3387
3388         /* Split by four means we need to re-create two ports, otherwise
3389          * only one.
3390          */
3391         count = count / 2;
3392
3393         for (i = 0; i < count; i++) {
3394                 local_port = base_port + i * 2;
3395                 if (mlxsw_sp->port_to_module[local_port] < 0)
3396                         continue;
3397                 module = mlxsw_sp->port_to_module[local_port];
3398
3399                 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3400                                      width, 0);
3401         }
3402 }
3403
3404 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3405                                unsigned int count,
3406                                struct netlink_ext_ack *extack)
3407 {
3408         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3409         struct mlxsw_sp_port *mlxsw_sp_port;
3410         u8 module, cur_width, base_port;
3411         int i;
3412         int err;
3413
3414         mlxsw_sp_port = mlxsw_sp->ports[local_port];
3415         if (!mlxsw_sp_port) {
3416                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3417                         local_port);
3418                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3419                 return -EINVAL;
3420         }
3421
3422         module = mlxsw_sp_port->mapping.module;
3423         cur_width = mlxsw_sp_port->mapping.width;
3424
3425         if (count != 2 && count != 4) {
3426                 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3427                 NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports");
3428                 return -EINVAL;
3429         }
3430
3431         if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3432                 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3433                 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
3434                 return -EINVAL;
3435         }
3436
3437         /* Make sure we have enough slave (even) ports for the split. */
3438         if (count == 2) {
3439                 base_port = local_port;
3440                 if (mlxsw_sp->ports[base_port + 1]) {
3441                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3442                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3443                         return -EINVAL;
3444                 }
3445         } else {
3446                 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3447                 if (mlxsw_sp->ports[base_port + 1] ||
3448                     mlxsw_sp->ports[base_port + 3]) {
3449                         netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3450                         NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
3451                         return -EINVAL;
3452                 }
3453         }
3454
3455         for (i = 0; i < count; i++)
3456                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3457                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3458
3459         err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3460         if (err) {
3461                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3462                 goto err_port_split_create;
3463         }
3464
3465         return 0;
3466
3467 err_port_split_create:
3468         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3469         return err;
3470 }
3471
3472 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
3473                                  struct netlink_ext_ack *extack)
3474 {
3475         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3476         struct mlxsw_sp_port *mlxsw_sp_port;
3477         u8 cur_width, base_port;
3478         unsigned int count;
3479         int i;
3480
3481         mlxsw_sp_port = mlxsw_sp->ports[local_port];
3482         if (!mlxsw_sp_port) {
3483                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3484                         local_port);
3485                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
3486                 return -EINVAL;
3487         }
3488
3489         if (!mlxsw_sp_port->split) {
3490                 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
3491                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
3492                 return -EINVAL;
3493         }
3494
3495         cur_width = mlxsw_sp_port->mapping.width;
3496         count = cur_width == 1 ? 4 : 2;
3497
3498         base_port = mlxsw_sp_cluster_base_port_get(local_port);
3499
3500         /* Determine which ports to remove. */
3501         if (count == 2 && local_port >= base_port + 2)
3502                 base_port = base_port + 2;
3503
3504         for (i = 0; i < count; i++)
3505                 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3506                         mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3507
3508         mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3509
3510         return 0;
3511 }
3512
3513 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3514                                      char *pude_pl, void *priv)
3515 {
3516         struct mlxsw_sp *mlxsw_sp = priv;
3517         struct mlxsw_sp_port *mlxsw_sp_port;
3518         enum mlxsw_reg_pude_oper_status status;
3519         u8 local_port;
3520
3521         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3522         mlxsw_sp_port = mlxsw_sp->ports[local_port];
3523         if (!mlxsw_sp_port)
3524                 return;
3525
3526         status = mlxsw_reg_pude_oper_status_get(pude_pl);
3527         if (status == MLXSW_PORT_OPER_STATUS_UP) {
3528                 netdev_info(mlxsw_sp_port->dev, "link up\n");
3529                 netif_carrier_on(mlxsw_sp_port->dev);
3530         } else {
3531                 netdev_info(mlxsw_sp_port->dev, "link down\n");
3532                 netif_carrier_off(mlxsw_sp_port->dev);
3533         }
3534 }
3535
3536 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3537                                               u8 local_port, void *priv)
3538 {
3539         struct mlxsw_sp *mlxsw_sp = priv;
3540         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3541         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3542
3543         if (unlikely(!mlxsw_sp_port)) {
3544                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3545                                      local_port);
3546                 return;
3547         }
3548
3549         skb->dev = mlxsw_sp_port->dev;
3550
3551         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3552         u64_stats_update_begin(&pcpu_stats->syncp);
3553         pcpu_stats->rx_packets++;
3554         pcpu_stats->rx_bytes += skb->len;
3555         u64_stats_update_end(&pcpu_stats->syncp);
3556
3557         skb->protocol = eth_type_trans(skb, skb->dev);
3558         netif_receive_skb(skb);
3559 }
3560
3561 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3562                                            void *priv)
3563 {
3564         skb->offload_fwd_mark = 1;
3565         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3566 }
3567
3568 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
3569                                               u8 local_port, void *priv)
3570 {
3571         skb->offload_l3_fwd_mark = 1;
3572         skb->offload_fwd_mark = 1;
3573         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3574 }
3575
3576 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3577                                              void *priv)
3578 {
3579         struct mlxsw_sp *mlxsw_sp = priv;
3580         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3581         struct psample_group *psample_group;
3582         u32 size;
3583
3584         if (unlikely(!mlxsw_sp_port)) {
3585                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3586                                      local_port);
3587                 goto out;
3588         }
3589         if (unlikely(!mlxsw_sp_port->sample)) {
3590                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3591                                      local_port);
3592                 goto out;
3593         }
3594
3595         size = mlxsw_sp_port->sample->truncate ?
3596                   mlxsw_sp_port->sample->trunc_size : skb->len;
3597
3598         rcu_read_lock();
3599         psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3600         if (!psample_group)
3601                 goto out_unlock;
3602         psample_sample_packet(psample_group, skb, size,
3603                               mlxsw_sp_port->dev->ifindex, 0,
3604                               mlxsw_sp_port->sample->rate);
3605 out_unlock:
3606         rcu_read_unlock();
3607 out:
3608         consume_skb(skb);
3609 }
3610
3611 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
3612         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3613                   _is_ctrl, SP_##_trap_group, DISCARD)
3614
3615 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
3616         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
3617                 _is_ctrl, SP_##_trap_group, DISCARD)
3618
3619 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
3620         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
3621                 _is_ctrl, SP_##_trap_group, DISCARD)
3622
3623 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
3624         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3625
3626 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3627         /* Events */
3628         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3629         /* L2 traps */
3630         MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3631         MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3632         MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3633         MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3634         MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3635         MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3636         MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3637         MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3638         MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3639         MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3640         MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3641         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3642         MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3643                           false),
3644         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3645                              false),
3646         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3647                              false),
3648         MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3649                              false),
3650         /* L3 traps */
3651         MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3652         MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3653         MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
3654         MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3655         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3656                           false),
3657         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3658         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3659         MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3660         MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3661                           false),
3662         MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3663         MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3664         MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3665         MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3666         MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3667         MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3668         MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3669                           false),
3670         MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3671                           false),
3672         MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3673                           false),
3674         MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3675                           false),
3676         MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3677         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3678                           false),
3679         MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3680         MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3681         MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3682         MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3683         MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3684         MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
3685         MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3686         MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
3687         /* PKT Sample trap */
3688         MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3689                   false, SP_IP2ME, DISCARD),
3690         /* ACL trap */
3691         MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3692         /* Multicast Router Traps */
3693         MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
3694         MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
3695         MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
3696         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
3697         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
3698         /* NVE traps */
3699         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
3700         MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
3701 };
3702
3703 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3704 {
3705         char qpcr_pl[MLXSW_REG_QPCR_LEN];
3706         enum mlxsw_reg_qpcr_ir_units ir_units;
3707         int max_cpu_policers;
3708         bool is_bytes;
3709         u8 burst_size;
3710         u32 rate;
3711         int i, err;
3712
3713         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3714                 return -EIO;
3715
3716         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3717
3718         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3719         for (i = 0; i < max_cpu_policers; i++) {
3720                 is_bytes = false;
3721                 switch (i) {
3722                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3723                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3724                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3725                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3726                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3727                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3728                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
3729                         rate = 128;
3730                         burst_size = 7;
3731                         break;
3732                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3733                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3734                         rate = 16 * 1024;
3735                         burst_size = 10;
3736                         break;
3737                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3738                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3739                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3740                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3741                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3742                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3743                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3744                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3745                         rate = 1024;
3746                         burst_size = 7;
3747                         break;
3748                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3749                         rate = 4 * 1024;
3750                         burst_size = 4;
3751                         break;
3752                 default:
3753                         continue;
3754                 }
3755
3756                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3757                                     burst_size);
3758                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3759                 if (err)
3760                         return err;
3761         }
3762
3763         return 0;
3764 }
3765
3766 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3767 {
3768         char htgt_pl[MLXSW_REG_HTGT_LEN];
3769         enum mlxsw_reg_htgt_trap_group i;
3770         int max_cpu_policers;
3771         int max_trap_groups;
3772         u8 priority, tc;
3773         u16 policer_id;
3774         int err;
3775
3776         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3777                 return -EIO;
3778
3779         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3780         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3781
3782         for (i = 0; i < max_trap_groups; i++) {
3783                 policer_id = i;
3784                 switch (i) {
3785                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3786                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3787                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3788                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3789                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
3790                         priority = 5;
3791                         tc = 5;
3792                         break;
3793                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3794                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3795                         priority = 4;
3796                         tc = 4;
3797                         break;
3798                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3799                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3800                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3801                         priority = 3;
3802                         tc = 3;
3803                         break;
3804                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3805                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3806                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
3807                         priority = 2;
3808                         tc = 2;
3809                         break;
3810                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3811                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3812                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3813                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
3814                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
3815                         priority = 1;
3816                         tc = 1;
3817                         break;
3818                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3819                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3820                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
3821                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3822                         break;
3823                 default:
3824                         continue;
3825                 }
3826
3827                 if (max_cpu_policers <= policer_id &&
3828                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3829                         return -EIO;
3830
3831                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3832                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3833                 if (err)
3834                         return err;
3835         }
3836
3837         return 0;
3838 }
3839
3840 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3841 {
3842         int i;
3843         int err;
3844
3845         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3846         if (err)
3847                 return err;
3848
3849         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3850         if (err)
3851                 return err;
3852
3853         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3854                 err = mlxsw_core_trap_register(mlxsw_sp->core,
3855                                                &mlxsw_sp_listener[i],
3856                                                mlxsw_sp);
3857                 if (err)
3858                         goto err_listener_register;
3859
3860         }
3861         return 0;
3862
3863 err_listener_register:
3864         for (i--; i >= 0; i--) {
3865                 mlxsw_core_trap_unregister(mlxsw_sp->core,
3866                                            &mlxsw_sp_listener[i],
3867                                            mlxsw_sp);
3868         }
3869         return err;
3870 }
3871
3872 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3873 {
3874         int i;
3875
3876         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3877                 mlxsw_core_trap_unregister(mlxsw_sp->core,
3878                                            &mlxsw_sp_listener[i],
3879                                            mlxsw_sp);
3880         }
3881 }
3882
3883 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3884 {
3885         char slcr_pl[MLXSW_REG_SLCR_LEN];
3886         u32 seed;
3887         int err;
3888
3889         get_random_bytes(&seed, sizeof(seed));
3890         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3891                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
3892                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3893                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
3894                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
3895                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
3896                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
3897                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
3898                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
3899         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3900         if (err)
3901                 return err;
3902
3903         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3904             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3905                 return -EIO;
3906
3907         mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3908                                  sizeof(struct mlxsw_sp_upper),
3909                                  GFP_KERNEL);
3910         if (!mlxsw_sp->lags)
3911                 return -ENOMEM;
3912
3913         return 0;
3914 }
3915
3916 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3917 {
3918         kfree(mlxsw_sp->lags);
3919 }
3920
3921 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3922 {
3923         char htgt_pl[MLXSW_REG_HTGT_LEN];
3924
3925         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3926                             MLXSW_REG_HTGT_INVALID_POLICER,
3927                             MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3928                             MLXSW_REG_HTGT_DEFAULT_TC);
3929         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3930 }
3931
3932 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3933                                     unsigned long event, void *ptr);
3934
3935 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3936                          const struct mlxsw_bus_info *mlxsw_bus_info)
3937 {
3938         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3939         int err;
3940
3941         mlxsw_sp->core = mlxsw_core;
3942         mlxsw_sp->bus_info = mlxsw_bus_info;
3943
3944         err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3945         if (err)
3946                 return err;
3947
3948         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3949         if (err) {
3950                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3951                 return err;
3952         }
3953
3954         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3955         if (err) {
3956                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3957                 return err;
3958         }
3959
3960         err = mlxsw_sp_fids_init(mlxsw_sp);
3961         if (err) {
3962                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3963                 goto err_fids_init;
3964         }
3965
3966         err = mlxsw_sp_traps_init(mlxsw_sp);
3967         if (err) {
3968                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3969                 goto err_traps_init;
3970         }
3971
3972         err = mlxsw_sp_buffers_init(mlxsw_sp);
3973         if (err) {
3974                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3975                 goto err_buffers_init;
3976         }
3977
3978         err = mlxsw_sp_lag_init(mlxsw_sp);
3979         if (err) {
3980                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3981                 goto err_lag_init;
3982         }
3983
3984         /* Initialize SPAN before router and switchdev, so that those components
3985          * can call mlxsw_sp_span_respin().
3986          */
3987         err = mlxsw_sp_span_init(mlxsw_sp);
3988         if (err) {
3989                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3990                 goto err_span_init;
3991         }
3992
3993         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3994         if (err) {
3995                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3996                 goto err_switchdev_init;
3997         }
3998
3999         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
4000         if (err) {
4001                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
4002                 goto err_counter_pool_init;
4003         }
4004
4005         err = mlxsw_sp_afa_init(mlxsw_sp);
4006         if (err) {
4007                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
4008                 goto err_afa_init;
4009         }
4010
4011         err = mlxsw_sp_nve_init(mlxsw_sp);
4012         if (err) {
4013                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
4014                 goto err_nve_init;
4015         }
4016
4017         err = mlxsw_sp_acl_init(mlxsw_sp);
4018         if (err) {
4019                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
4020                 goto err_acl_init;
4021         }
4022
4023         err = mlxsw_sp_router_init(mlxsw_sp);
4024         if (err) {
4025                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
4026                 goto err_router_init;
4027         }
4028
4029         /* Initialize netdevice notifier after router and SPAN is initialized,
4030          * so that the event handler can use router structures and call SPAN
4031          * respin.
4032          */
4033         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
4034         err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4035         if (err) {
4036                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
4037                 goto err_netdev_notifier;
4038         }
4039
4040         err = mlxsw_sp_dpipe_init(mlxsw_sp);
4041         if (err) {
4042                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
4043                 goto err_dpipe_init;
4044         }
4045
4046         err = mlxsw_sp_ports_create(mlxsw_sp);
4047         if (err) {
4048                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
4049                 goto err_ports_create;
4050         }
4051
4052         return 0;
4053
4054 err_ports_create:
4055         mlxsw_sp_dpipe_fini(mlxsw_sp);
4056 err_dpipe_init:
4057         unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4058 err_netdev_notifier:
4059         mlxsw_sp_router_fini(mlxsw_sp);
4060 err_router_init:
4061         mlxsw_sp_acl_fini(mlxsw_sp);
4062 err_acl_init:
4063         mlxsw_sp_nve_fini(mlxsw_sp);
4064 err_nve_init:
4065         mlxsw_sp_afa_fini(mlxsw_sp);
4066 err_afa_init:
4067         mlxsw_sp_counter_pool_fini(mlxsw_sp);
4068 err_counter_pool_init:
4069         mlxsw_sp_switchdev_fini(mlxsw_sp);
4070 err_switchdev_init:
4071         mlxsw_sp_span_fini(mlxsw_sp);
4072 err_span_init:
4073         mlxsw_sp_lag_fini(mlxsw_sp);
4074 err_lag_init:
4075         mlxsw_sp_buffers_fini(mlxsw_sp);
4076 err_buffers_init:
4077         mlxsw_sp_traps_fini(mlxsw_sp);
4078 err_traps_init:
4079         mlxsw_sp_fids_fini(mlxsw_sp);
4080 err_fids_init:
4081         mlxsw_sp_kvdl_fini(mlxsw_sp);
4082         return err;
4083 }
4084
4085 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
4086                           const struct mlxsw_bus_info *mlxsw_bus_info)
4087 {
4088         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4089
4090         mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
4091         mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
4092         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
4093         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
4094         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
4095         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
4096         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
4097         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
4098         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
4099
4100         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4101 }
4102
4103 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
4104                           const struct mlxsw_bus_info *mlxsw_bus_info)
4105 {
4106         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4107
4108         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
4109         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
4110         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
4111         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
4112         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
4113         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
4114         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
4115
4116         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
4117 }
4118
4119 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
4120 {
4121         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
4122
4123         mlxsw_sp_ports_remove(mlxsw_sp);
4124         mlxsw_sp_dpipe_fini(mlxsw_sp);
4125         unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
4126         mlxsw_sp_router_fini(mlxsw_sp);
4127         mlxsw_sp_acl_fini(mlxsw_sp);
4128         mlxsw_sp_nve_fini(mlxsw_sp);
4129         mlxsw_sp_afa_fini(mlxsw_sp);
4130         mlxsw_sp_counter_pool_fini(mlxsw_sp);
4131         mlxsw_sp_switchdev_fini(mlxsw_sp);
4132         mlxsw_sp_span_fini(mlxsw_sp);
4133         mlxsw_sp_lag_fini(mlxsw_sp);
4134         mlxsw_sp_buffers_fini(mlxsw_sp);
4135         mlxsw_sp_traps_fini(mlxsw_sp);
4136         mlxsw_sp_fids_fini(mlxsw_sp);
4137         mlxsw_sp_kvdl_fini(mlxsw_sp);
4138 }
4139
4140 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
4141  * 802.1Q FIDs
4142  */
4143 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE   (MLXSW_SP_FID_8021D_MAX + \
4144                                          VLAN_VID_MASK - 1)
4145
4146 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
4147         .used_max_mid                   = 1,
4148         .max_mid                        = MLXSW_SP_MID_MAX,
4149         .used_flood_tables              = 1,
4150         .used_flood_mode                = 1,
4151         .flood_mode                     = 3,
4152         .max_fid_flood_tables           = 3,
4153         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4154         .used_max_ib_mc                 = 1,
4155         .max_ib_mc                      = 0,
4156         .used_max_pkey                  = 1,
4157         .max_pkey                       = 0,
4158         .used_kvd_sizes                 = 1,
4159         .kvd_hash_single_parts          = 59,
4160         .kvd_hash_double_parts          = 41,
4161         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
4162         .swid_config                    = {
4163                 {
4164                         .used_type      = 1,
4165                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
4166                 }
4167         },
4168 };
4169
4170 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
4171         .used_max_mid                   = 1,
4172         .max_mid                        = MLXSW_SP_MID_MAX,
4173         .used_flood_tables              = 1,
4174         .used_flood_mode                = 1,
4175         .flood_mode                     = 3,
4176         .max_fid_flood_tables           = 3,
4177         .fid_flood_table_size           = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
4178         .used_max_ib_mc                 = 1,
4179         .max_ib_mc                      = 0,
4180         .used_max_pkey                  = 1,
4181         .max_pkey                       = 0,
4182         .swid_config                    = {
4183                 {
4184                         .used_type      = 1,
4185                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
4186                 }
4187         },
4188 };
4189
4190 static void
4191 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4192                                       struct devlink_resource_size_params *kvd_size_params,
4193                                       struct devlink_resource_size_params *linear_size_params,
4194                                       struct devlink_resource_size_params *hash_double_size_params,
4195                                       struct devlink_resource_size_params *hash_single_size_params)
4196 {
4197         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4198                                                  KVD_SINGLE_MIN_SIZE);
4199         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4200                                                  KVD_DOUBLE_MIN_SIZE);
4201         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4202         u32 linear_size_min = 0;
4203
4204         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4205                                           MLXSW_SP_KVD_GRANULARITY,
4206                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4207         devlink_resource_size_params_init(linear_size_params, linear_size_min,
4208                                           kvd_size - single_size_min -
4209                                           double_size_min,
4210                                           MLXSW_SP_KVD_GRANULARITY,
4211                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4212         devlink_resource_size_params_init(hash_double_size_params,
4213                                           double_size_min,
4214                                           kvd_size - single_size_min -
4215                                           linear_size_min,
4216                                           MLXSW_SP_KVD_GRANULARITY,
4217                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4218         devlink_resource_size_params_init(hash_single_size_params,
4219                                           single_size_min,
4220                                           kvd_size - double_size_min -
4221                                           linear_size_min,
4222                                           MLXSW_SP_KVD_GRANULARITY,
4223                                           DEVLINK_RESOURCE_UNIT_ENTRY);
4224 }
4225
4226 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
4227 {
4228         struct devlink *devlink = priv_to_devlink(mlxsw_core);
4229         struct devlink_resource_size_params hash_single_size_params;
4230         struct devlink_resource_size_params hash_double_size_params;
4231         struct devlink_resource_size_params linear_size_params;
4232         struct devlink_resource_size_params kvd_size_params;
4233         u32 kvd_size, single_size, double_size, linear_size;
4234         const struct mlxsw_config_profile *profile;
4235         int err;
4236
4237         profile = &mlxsw_sp1_config_profile;
4238         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4239                 return -EIO;
4240
4241         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4242                                               &linear_size_params,
4243                                               &hash_double_size_params,
4244                                               &hash_single_size_params);
4245
4246         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4247         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4248                                         kvd_size, MLXSW_SP_RESOURCE_KVD,
4249                                         DEVLINK_RESOURCE_ID_PARENT_TOP,
4250                                         &kvd_size_params);
4251         if (err)
4252                 return err;
4253
4254         linear_size = profile->kvd_linear_size;
4255         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
4256                                         linear_size,
4257                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
4258                                         MLXSW_SP_RESOURCE_KVD,
4259                                         &linear_size_params);
4260         if (err)
4261                 return err;
4262
4263         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
4264         if  (err)
4265                 return err;
4266
4267         double_size = kvd_size - linear_size;
4268         double_size *= profile->kvd_hash_double_parts;
4269         double_size /= profile->kvd_hash_double_parts +
4270                        profile->kvd_hash_single_parts;
4271         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
4272         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
4273                                         double_size,
4274                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4275                                         MLXSW_SP_RESOURCE_KVD,
4276                                         &hash_double_size_params);
4277         if (err)
4278                 return err;
4279
4280         single_size = kvd_size - double_size - linear_size;
4281         err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
4282                                         single_size,
4283                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4284                                         MLXSW_SP_RESOURCE_KVD,
4285                                         &hash_single_size_params);
4286         if (err)
4287                 return err;
4288
4289         return 0;
4290 }
4291
4292 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
4293 {
4294         return mlxsw_sp1_resources_kvd_register(mlxsw_core);
4295 }
4296
4297 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
4298 {
4299         return 0;
4300 }
4301
4302 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
4303                                   const struct mlxsw_config_profile *profile,
4304                                   u64 *p_single_size, u64 *p_double_size,
4305                                   u64 *p_linear_size)
4306 {
4307         struct devlink *devlink = priv_to_devlink(mlxsw_core);
4308         u32 double_size;
4309         int err;
4310
4311         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4312             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
4313                 return -EIO;
4314
4315         /* The hash part is what left of the kvd without the
4316          * linear part. It is split to the single size and
4317          * double size by the parts ratio from the profile.
4318          * Both sizes must be a multiplications of the
4319          * granularity from the profile. In case the user
4320          * provided the sizes they are obtained via devlink.
4321          */
4322         err = devlink_resource_size_get(devlink,
4323                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
4324                                         p_linear_size);
4325         if (err)
4326                 *p_linear_size = profile->kvd_linear_size;
4327
4328         err = devlink_resource_size_get(devlink,
4329                                         MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4330                                         p_double_size);
4331         if (err) {
4332                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4333                               *p_linear_size;
4334                 double_size *= profile->kvd_hash_double_parts;
4335                 double_size /= profile->kvd_hash_double_parts +
4336                                profile->kvd_hash_single_parts;
4337                 *p_double_size = rounddown(double_size,
4338                                            MLXSW_SP_KVD_GRANULARITY);
4339         }
4340
4341         err = devlink_resource_size_get(devlink,
4342                                         MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4343                                         p_single_size);
4344         if (err)
4345                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
4346                                  *p_double_size - *p_linear_size;
4347
4348         /* Check results are legal. */
4349         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
4350             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
4351             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
4352                 return -EIO;
4353
4354         return 0;
4355 }
4356
4357 static int
4358 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
4359                                                union devlink_param_value val,
4360                                                struct netlink_ext_ack *extack)
4361 {
4362         if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
4363             (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
4364                 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
4365                 return -EINVAL;
4366         }
4367
4368         return 0;
4369 }
4370
4371 static const struct devlink_param mlxsw_sp_devlink_params[] = {
4372         DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
4373                               BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
4374                               NULL, NULL,
4375                               mlxsw_sp_devlink_param_fw_load_policy_validate),
4376 };
4377
4378 static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
4379 {
4380         struct devlink *devlink = priv_to_devlink(mlxsw_core);
4381         union devlink_param_value value;
4382         int err;
4383
4384         err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
4385                                       ARRAY_SIZE(mlxsw_sp_devlink_params));
4386         if (err)
4387                 return err;
4388
4389         value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
4390         devlink_param_driverinit_value_set(devlink,
4391                                            DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
4392                                            value);
4393         return 0;
4394 }
4395
4396 static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
4397 {
4398         devlink_params_unregister(priv_to_devlink(mlxsw_core),
4399                                   mlxsw_sp_devlink_params,
4400                                   ARRAY_SIZE(mlxsw_sp_devlink_params));
4401 }
4402
4403 static struct mlxsw_driver mlxsw_sp1_driver = {
4404         .kind                           = mlxsw_sp1_driver_name,
4405         .priv_size                      = sizeof(struct mlxsw_sp),
4406         .init                           = mlxsw_sp1_init,
4407         .fini                           = mlxsw_sp_fini,
4408         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
4409         .port_split                     = mlxsw_sp_port_split,
4410         .port_unsplit                   = mlxsw_sp_port_unsplit,
4411         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4412         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4413         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4414         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4415         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4416         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4417         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4418         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4419         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4420         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4421         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4422         .resources_register             = mlxsw_sp1_resources_register,
4423         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
4424         .params_register                = mlxsw_sp_params_register,
4425         .params_unregister              = mlxsw_sp_params_unregister,
4426         .txhdr_len                      = MLXSW_TXHDR_LEN,
4427         .profile                        = &mlxsw_sp1_config_profile,
4428         .res_query_enabled              = true,
4429 };
4430
4431 static struct mlxsw_driver mlxsw_sp2_driver = {
4432         .kind                           = mlxsw_sp2_driver_name,
4433         .priv_size                      = sizeof(struct mlxsw_sp),
4434         .init                           = mlxsw_sp2_init,
4435         .fini                           = mlxsw_sp_fini,
4436         .basic_trap_groups_set          = mlxsw_sp_basic_trap_groups_set,
4437         .port_split                     = mlxsw_sp_port_split,
4438         .port_unsplit                   = mlxsw_sp_port_unsplit,
4439         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4440         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4441         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4442         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4443         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4444         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4445         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4446         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4447         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4448         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4449         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4450         .resources_register             = mlxsw_sp2_resources_register,
4451         .params_register                = mlxsw_sp_params_register,
4452         .params_unregister              = mlxsw_sp_params_unregister,
4453         .txhdr_len                      = MLXSW_TXHDR_LEN,
4454         .profile                        = &mlxsw_sp2_config_profile,
4455         .res_query_enabled              = true,
4456 };
4457
4458 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4459 {
4460         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4461 }
4462
4463 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
4464 {
4465         struct mlxsw_sp_port **p_mlxsw_sp_port = data;
4466         int ret = 0;
4467
4468         if (mlxsw_sp_port_dev_check(lower_dev)) {
4469                 *p_mlxsw_sp_port = netdev_priv(lower_dev);
4470                 ret = 1;
4471         }
4472
4473         return ret;
4474 }
4475
4476 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4477 {
4478         struct mlxsw_sp_port *mlxsw_sp_port;
4479
4480         if (mlxsw_sp_port_dev_check(dev))
4481                 return netdev_priv(dev);
4482
4483         mlxsw_sp_port = NULL;
4484         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
4485
4486         return mlxsw_sp_port;
4487 }
4488
4489 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4490 {
4491         struct mlxsw_sp_port *mlxsw_sp_port;
4492
4493         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4494         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4495 }
4496
4497 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4498 {
4499         struct mlxsw_sp_port *mlxsw_sp_port;
4500
4501         if (mlxsw_sp_port_dev_check(dev))
4502                 return netdev_priv(dev);
4503
4504         mlxsw_sp_port = NULL;
4505         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4506                                       &mlxsw_sp_port);
4507
4508         return mlxsw_sp_port;
4509 }
4510
4511 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
4512 {
4513         struct mlxsw_sp_port *mlxsw_sp_port;
4514
4515         rcu_read_lock();
4516         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
4517         if (mlxsw_sp_port)
4518                 dev_hold(mlxsw_sp_port->dev);
4519         rcu_read_unlock();
4520         return mlxsw_sp_port;
4521 }
4522
4523 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
4524 {
4525         dev_put(mlxsw_sp_port->dev);
4526 }
4527
4528 static void
4529 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4530                                  struct net_device *lag_dev)
4531 {
4532         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4533         struct net_device *upper_dev;
4534         struct list_head *iter;
4535
4536         if (netif_is_bridge_port(lag_dev))
4537                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4538
4539         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4540                 if (!netif_is_bridge_port(upper_dev))
4541                         continue;
4542                 br_dev = netdev_master_upper_dev_get(upper_dev);
4543                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4544         }
4545 }
4546
4547 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4548 {
4549         char sldr_pl[MLXSW_REG_SLDR_LEN];
4550
4551         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4552         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4553 }
4554
4555 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4556 {
4557         char sldr_pl[MLXSW_REG_SLDR_LEN];
4558
4559         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4560         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4561 }
4562
4563 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4564                                      u16 lag_id, u8 port_index)
4565 {
4566         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4567         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4568
4569         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4570                                       lag_id, port_index);
4571         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4572 }
4573
4574 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4575                                         u16 lag_id)
4576 {
4577         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4578         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4579
4580         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4581                                          lag_id);
4582         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4583 }
4584
4585 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4586                                         u16 lag_id)
4587 {
4588         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4589         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4590
4591         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4592                                         lag_id);
4593         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4594 }
4595
4596 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4597                                          u16 lag_id)
4598 {
4599         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4600         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4601
4602         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4603                                          lag_id);
4604         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4605 }
4606
4607 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4608                                   struct net_device *lag_dev,
4609                                   u16 *p_lag_id)
4610 {
4611         struct mlxsw_sp_upper *lag;
4612         int free_lag_id = -1;
4613         u64 max_lag;
4614         int i;
4615
4616         max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4617         for (i = 0; i < max_lag; i++) {
4618                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4619                 if (lag->ref_count) {
4620                         if (lag->dev == lag_dev) {
4621                                 *p_lag_id = i;
4622                                 return 0;
4623                         }
4624                 } else if (free_lag_id < 0) {
4625                         free_lag_id = i;
4626                 }
4627         }
4628         if (free_lag_id < 0)
4629                 return -EBUSY;
4630         *p_lag_id = free_lag_id;
4631         return 0;
4632 }
4633
4634 static bool
4635 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4636                           struct net_device *lag_dev,
4637                           struct netdev_lag_upper_info *lag_upper_info,
4638                           struct netlink_ext_ack *extack)
4639 {
4640         u16 lag_id;
4641
4642         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4643                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4644                 return false;
4645         }
4646         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4647                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4648                 return false;
4649         }
4650         return true;
4651 }
4652
4653 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4654                                        u16 lag_id, u8 *p_port_index)
4655 {
4656         u64 max_lag_members;
4657         int i;
4658
4659         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4660                                              MAX_LAG_MEMBERS);
4661         for (i = 0; i < max_lag_members; i++) {
4662                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4663                         *p_port_index = i;
4664                         return 0;
4665                 }
4666         }
4667         return -EBUSY;
4668 }
4669
4670 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4671                                   struct net_device *lag_dev)
4672 {
4673         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4674         struct mlxsw_sp_upper *lag;
4675         u16 lag_id;
4676         u8 port_index;
4677         int err;
4678
4679         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4680         if (err)
4681                 return err;
4682         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4683         if (!lag->ref_count) {
4684                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4685                 if (err)
4686                         return err;
4687                 lag->dev = lag_dev;
4688         }
4689
4690         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4691         if (err)
4692                 return err;
4693         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4694         if (err)
4695                 goto err_col_port_add;
4696         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
4697         if (err)
4698                 goto err_col_port_enable;
4699
4700         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4701                                    mlxsw_sp_port->local_port);
4702         mlxsw_sp_port->lag_id = lag_id;
4703         mlxsw_sp_port->lagged = 1;
4704         lag->ref_count++;
4705
4706         /* Port is no longer usable as a router interface */
4707         if (mlxsw_sp_port->default_vlan->fid)
4708                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4709
4710         return 0;
4711
4712 err_col_port_enable:
4713         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4714 err_col_port_add:
4715         if (!lag->ref_count)
4716                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4717         return err;
4718 }
4719
4720 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4721                                     struct net_device *lag_dev)
4722 {
4723         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4724         u16 lag_id = mlxsw_sp_port->lag_id;
4725         struct mlxsw_sp_upper *lag;
4726
4727         if (!mlxsw_sp_port->lagged)
4728                 return;
4729         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4730         WARN_ON(lag->ref_count == 0);
4731
4732         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
4733         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4734
4735         /* Any VLANs configured on the port are no longer valid */
4736         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4737         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4738         /* Make the LAG and its directly linked uppers leave bridges they
4739          * are memeber in
4740          */
4741         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4742
4743         if (lag->ref_count == 1)
4744                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4745
4746         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4747                                      mlxsw_sp_port->local_port);
4748         mlxsw_sp_port->lagged = 0;
4749         lag->ref_count--;
4750
4751         /* Make sure untagged frames are allowed to ingress */
4752         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
4753 }
4754
4755 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4756                                       u16 lag_id)
4757 {
4758         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4759         char sldr_pl[MLXSW_REG_SLDR_LEN];
4760
4761         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4762                                          mlxsw_sp_port->local_port);
4763         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4764 }
4765
4766 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4767                                          u16 lag_id)
4768 {
4769         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4770         char sldr_pl[MLXSW_REG_SLDR_LEN];
4771
4772         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4773                                             mlxsw_sp_port->local_port);
4774         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4775 }
4776
4777 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
4778                                        bool lag_tx_enabled)
4779 {
4780         if (lag_tx_enabled)
4781                 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
4782                                                   mlxsw_sp_port->lag_id);
4783         else
4784                 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4785                                                      mlxsw_sp_port->lag_id);
4786 }
4787
4788 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4789                                      struct netdev_lag_lower_state_info *info)
4790 {
4791         return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
4792 }
4793
4794 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4795                                  bool enable)
4796 {
4797         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4798         enum mlxsw_reg_spms_state spms_state;
4799         char *spms_pl;
4800         u16 vid;
4801         int err;
4802
4803         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4804                               MLXSW_REG_SPMS_STATE_DISCARDING;
4805
4806         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4807         if (!spms_pl)
4808                 return -ENOMEM;
4809         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4810
4811         for (vid = 0; vid < VLAN_N_VID; vid++)
4812                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4813
4814         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4815         kfree(spms_pl);
4816         return err;
4817 }
4818
4819 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4820 {
4821         u16 vid = 1;
4822         int err;
4823
4824         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4825         if (err)
4826                 return err;
4827         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4828         if (err)
4829                 goto err_port_stp_set;
4830         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4831                                      true, false);
4832         if (err)
4833                 goto err_port_vlan_set;
4834
4835         for (; vid <= VLAN_N_VID - 1; vid++) {
4836                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4837                                                      vid, false);
4838                 if (err)
4839                         goto err_vid_learning_set;
4840         }
4841
4842         return 0;
4843
4844 err_vid_learning_set:
4845         for (vid--; vid >= 1; vid--)
4846                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4847 err_port_vlan_set:
4848         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4849 err_port_stp_set:
4850         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4851         return err;
4852 }
4853
4854 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4855 {
4856         u16 vid;
4857
4858         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4859                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4860                                                vid, true);
4861
4862         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4863                                false, false);
4864         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4865         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4866 }
4867
4868 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4869 {
4870         unsigned int num_vxlans = 0;
4871         struct net_device *dev;
4872         struct list_head *iter;
4873
4874         netdev_for_each_lower_dev(br_dev, dev, iter) {
4875                 if (netif_is_vxlan(dev))
4876                         num_vxlans++;
4877         }
4878
4879         return num_vxlans > 1;
4880 }
4881
4882 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4883 {
4884         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4885         struct net_device *dev;
4886         struct list_head *iter;
4887
4888         netdev_for_each_lower_dev(br_dev, dev, iter) {
4889                 u16 pvid;
4890                 int err;
4891
4892                 if (!netif_is_vxlan(dev))
4893                         continue;
4894
4895                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4896                 if (err || !pvid)
4897                         continue;
4898
4899                 if (test_and_set_bit(pvid, vlans))
4900                         return false;
4901         }
4902
4903         return true;
4904 }
4905
4906 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4907                                            struct netlink_ext_ack *extack)
4908 {
4909         if (br_multicast_enabled(br_dev)) {
4910                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4911                 return false;
4912         }
4913
4914         if (!br_vlan_enabled(br_dev) &&
4915             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4916                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4917                 return false;
4918         }
4919
4920         if (br_vlan_enabled(br_dev) &&
4921             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4922                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4923                 return false;
4924         }
4925
4926         return true;
4927 }
4928
4929 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4930                                                struct net_device *dev,
4931                                                unsigned long event, void *ptr)
4932 {
4933         struct netdev_notifier_changeupper_info *info;
4934         struct mlxsw_sp_port *mlxsw_sp_port;
4935         struct netlink_ext_ack *extack;
4936         struct net_device *upper_dev;
4937         struct mlxsw_sp *mlxsw_sp;
4938         int err = 0;
4939
4940         mlxsw_sp_port = netdev_priv(dev);
4941         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4942         info = ptr;
4943         extack = netdev_notifier_info_to_extack(&info->info);
4944
4945         switch (event) {
4946         case NETDEV_PRECHANGEUPPER:
4947                 upper_dev = info->upper_dev;
4948                 if (!is_vlan_dev(upper_dev) &&
4949                     !netif_is_lag_master(upper_dev) &&
4950                     !netif_is_bridge_master(upper_dev) &&
4951                     !netif_is_ovs_master(upper_dev) &&
4952                     !netif_is_macvlan(upper_dev)) {
4953                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4954                         return -EINVAL;
4955                 }
4956                 if (!info->linking)
4957                         break;
4958                 if (netif_is_bridge_master(upper_dev) &&
4959                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4960                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4961                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4962                         return -EOPNOTSUPP;
4963                 if (netdev_has_any_upper_dev(upper_dev) &&
4964                     (!netif_is_bridge_master(upper_dev) ||
4965                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4966                                                           upper_dev))) {
4967                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4968                         return -EINVAL;
4969                 }
4970                 if (netif_is_lag_master(upper_dev) &&
4971                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4972                                                info->upper_info, extack))
4973                         return -EINVAL;
4974                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4975                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4976                         return -EINVAL;
4977                 }
4978                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4979                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4980                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4981                         return -EINVAL;
4982                 }
4983                 if (netif_is_macvlan(upper_dev) &&
4984                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
4985                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4986                         return -EOPNOTSUPP;
4987                 }
4988                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4989                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4990                         return -EINVAL;
4991                 }
4992                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4993                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4994                         return -EINVAL;
4995                 }
4996                 break;
4997         case NETDEV_CHANGEUPPER:
4998                 upper_dev = info->upper_dev;
4999                 if (netif_is_bridge_master(upper_dev)) {
5000                         if (info->linking)
5001                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5002                                                                 lower_dev,
5003                                                                 upper_dev,
5004                                                                 extack);
5005                         else
5006                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5007                                                            lower_dev,
5008                                                            upper_dev);
5009                 } else if (netif_is_lag_master(upper_dev)) {
5010                         if (info->linking) {
5011                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5012                                                              upper_dev);
5013                         } else {
5014                                 mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
5015                                                             false);
5016                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5017                                                         upper_dev);
5018                         }
5019                 } else if (netif_is_ovs_master(upper_dev)) {
5020                         if (info->linking)
5021                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
5022                         else
5023                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
5024                 } else if (netif_is_macvlan(upper_dev)) {
5025                         if (!info->linking)
5026                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5027                 } else if (is_vlan_dev(upper_dev)) {
5028                         struct net_device *br_dev;
5029
5030                         if (!netif_is_bridge_port(upper_dev))
5031                                 break;
5032                         if (info->linking)
5033                                 break;
5034                         br_dev = netdev_master_upper_dev_get(upper_dev);
5035                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5036                                                    br_dev);
5037                 }
5038                 break;
5039         }
5040
5041         return err;
5042 }
5043
5044 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5045                                                unsigned long event, void *ptr)
5046 {
5047         struct netdev_notifier_changelowerstate_info *info;
5048         struct mlxsw_sp_port *mlxsw_sp_port;
5049         int err;
5050
5051         mlxsw_sp_port = netdev_priv(dev);
5052         info = ptr;
5053
5054         switch (event) {
5055         case NETDEV_CHANGELOWERSTATE:
5056                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5057                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5058                                                         info->lower_state_info);
5059                         if (err)
5060                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5061                 }
5062                 break;
5063         }
5064
5065         return 0;
5066 }
5067
5068 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5069                                          struct net_device *port_dev,
5070                                          unsigned long event, void *ptr)
5071 {
5072         switch (event) {
5073         case NETDEV_PRECHANGEUPPER:
5074         case NETDEV_CHANGEUPPER:
5075                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5076                                                            event, ptr);
5077         case NETDEV_CHANGELOWERSTATE:
5078                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5079                                                            ptr);
5080         }
5081
5082         return 0;
5083 }
5084
5085 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5086                                         unsigned long event, void *ptr)
5087 {
5088         struct net_device *dev;
5089         struct list_head *iter;
5090         int ret;
5091
5092         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5093                 if (mlxsw_sp_port_dev_check(dev)) {
5094                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5095                                                             ptr);
5096                         if (ret)
5097                                 return ret;
5098                 }
5099         }
5100
5101         return 0;
5102 }
5103
5104 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5105                                               struct net_device *dev,
5106                                               unsigned long event, void *ptr,
5107                                               u16 vid)
5108 {
5109         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5110         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5111         struct netdev_notifier_changeupper_info *info = ptr;
5112         struct netlink_ext_ack *extack;
5113         struct net_device *upper_dev;
5114         int err = 0;
5115
5116         extack = netdev_notifier_info_to_extack(&info->info);
5117
5118         switch (event) {
5119         case NETDEV_PRECHANGEUPPER:
5120                 upper_dev = info->upper_dev;
5121                 if (!netif_is_bridge_master(upper_dev) &&
5122                     !netif_is_macvlan(upper_dev)) {
5123                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5124                         return -EINVAL;
5125                 }
5126                 if (!info->linking)
5127                         break;
5128                 if (netif_is_bridge_master(upper_dev) &&
5129                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5130                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5131                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5132                         return -EOPNOTSUPP;
5133                 if (netdev_has_any_upper_dev(upper_dev) &&
5134                     (!netif_is_bridge_master(upper_dev) ||
5135                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5136                                                           upper_dev))) {
5137                         NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
5138                         return -EINVAL;
5139                 }
5140                 if (netif_is_macvlan(upper_dev) &&
5141                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5142                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5143                         return -EOPNOTSUPP;
5144                 }
5145                 break;
5146         case NETDEV_CHANGEUPPER:
5147                 upper_dev = info->upper_dev;
5148                 if (netif_is_bridge_master(upper_dev)) {
5149                         if (info->linking)
5150                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5151                                                                 vlan_dev,
5152                                                                 upper_dev,
5153                                                                 extack);
5154                         else
5155                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5156                                                            vlan_dev,
5157                                                            upper_dev);
5158                 } else if (netif_is_macvlan(upper_dev)) {
5159                         if (!info->linking)
5160                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5161                 } else {
5162                         err = -EINVAL;
5163                         WARN_ON(1);
5164                 }
5165                 break;
5166         }
5167
5168         return err;
5169 }
5170
5171 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5172                                                   struct net_device *lag_dev,
5173                                                   unsigned long event,
5174                                                   void *ptr, u16 vid)
5175 {
5176         struct net_device *dev;
5177         struct list_head *iter;
5178         int ret;
5179
5180         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5181                 if (mlxsw_sp_port_dev_check(dev)) {
5182                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5183                                                                  event, ptr,
5184                                                                  vid);
5185                         if (ret)
5186                                 return ret;
5187                 }
5188         }
5189
5190         return 0;
5191 }
5192
5193 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
5194                                                 struct net_device *br_dev,
5195                                                 unsigned long event, void *ptr,
5196                                                 u16 vid)
5197 {
5198         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
5199         struct netdev_notifier_changeupper_info *info = ptr;
5200         struct netlink_ext_ack *extack;
5201         struct net_device *upper_dev;
5202
5203         if (!mlxsw_sp)
5204                 return 0;
5205
5206         extack = netdev_notifier_info_to_extack(&info->info);
5207
5208         switch (event) {
5209         case NETDEV_PRECHANGEUPPER:
5210                 upper_dev = info->upper_dev;
5211                 if (!netif_is_macvlan(upper_dev)) {
5212                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5213                         return -EOPNOTSUPP;
5214                 }
5215                 if (!info->linking)
5216                         break;
5217                 if (netif_is_macvlan(upper_dev) &&
5218                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
5219                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5220                         return -EOPNOTSUPP;
5221                 }
5222                 break;
5223         case NETDEV_CHANGEUPPER:
5224                 upper_dev = info->upper_dev;
5225                 if (info->linking)
5226                         break;
5227                 if (netif_is_macvlan(upper_dev))
5228                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5229                 break;
5230         }
5231
5232         return 0;
5233 }
5234
5235 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
5236                                          unsigned long event, void *ptr)
5237 {
5238         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5239         u16 vid = vlan_dev_vlan_id(vlan_dev);
5240
5241         if (mlxsw_sp_port_dev_check(real_dev))
5242                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5243                                                           event, ptr, vid);
5244         else if (netif_is_lag_master(real_dev))
5245                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5246                                                               real_dev, event,
5247                                                               ptr, vid);
5248         else if (netif_is_bridge_master(real_dev))
5249                 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
5250                                                             event, ptr, vid);
5251
5252         return 0;
5253 }
5254
5255 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
5256                                            unsigned long event, void *ptr)
5257 {
5258         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
5259         struct netdev_notifier_changeupper_info *info = ptr;
5260         struct netlink_ext_ack *extack;
5261         struct net_device *upper_dev;
5262
5263         if (!mlxsw_sp)
5264                 return 0;
5265
5266         extack = netdev_notifier_info_to_extack(&info->info);
5267
5268         switch (event) {
5269         case NETDEV_PRECHANGEUPPER:
5270                 upper_dev = info->upper_dev;
5271                 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
5272                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5273                         return -EOPNOTSUPP;
5274                 }
5275                 if (!info->linking)
5276                         break;
5277                 if (netif_is_macvlan(upper_dev) &&
5278                     !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
5279                         NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
5280                         return -EOPNOTSUPP;
5281                 }
5282                 break;
5283         case NETDEV_CHANGEUPPER:
5284                 upper_dev = info->upper_dev;
5285                 if (info->linking)
5286                         break;
5287                 if (is_vlan_dev(upper_dev))
5288                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5289                 if (netif_is_macvlan(upper_dev))
5290                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5291                 break;
5292         }
5293
5294         return 0;
5295 }
5296
5297 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5298                                             unsigned long event, void *ptr)
5299 {
5300         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5301         struct netdev_notifier_changeupper_info *info = ptr;
5302         struct netlink_ext_ack *extack;
5303
5304         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5305                 return 0;
5306
5307         extack = netdev_notifier_info_to_extack(&info->info);
5308
5309         /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
5310         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5311
5312         return -EOPNOTSUPP;
5313 }
5314
5315 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
5316 {
5317         struct netdev_notifier_changeupper_info *info = ptr;
5318
5319         if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
5320                 return false;
5321         return netif_is_l3_master(info->upper_dev);
5322 }
5323
5324 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5325                                           struct net_device *dev,
5326                                           unsigned long event, void *ptr)
5327 {
5328         struct netdev_notifier_changeupper_info *cu_info;
5329         struct netdev_notifier_info *info = ptr;
5330         struct netlink_ext_ack *extack;
5331         struct net_device *upper_dev;
5332
5333         extack = netdev_notifier_info_to_extack(info);
5334
5335         switch (event) {
5336         case NETDEV_CHANGEUPPER:
5337                 cu_info = container_of(info,
5338                                        struct netdev_notifier_changeupper_info,
5339                                        info);
5340                 upper_dev = cu_info->upper_dev;
5341                 if (!netif_is_bridge_master(upper_dev))
5342                         return 0;
5343                 if (!mlxsw_sp_lower_get(upper_dev))
5344                         return 0;
5345                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5346                         return -EOPNOTSUPP;
5347                 if (cu_info->linking) {
5348                         if (!netif_running(dev))
5349                                 return 0;
5350                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5351                          * device needs to be mapped to a VLAN, but at this
5352                          * point no VLANs are configured on the VxLAN device
5353                          */
5354                         if (br_vlan_enabled(upper_dev))
5355                                 return 0;
5356                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5357                                                           dev, 0, extack);
5358                 } else {
5359                         /* VLANs were already flushed, which triggered the
5360                          * necessary cleanup
5361                          */
5362                         if (br_vlan_enabled(upper_dev))
5363                                 return 0;
5364                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5365                 }
5366                 break;
5367         case NETDEV_PRE_UP:
5368                 upper_dev = netdev_master_upper_dev_get(dev);
5369                 if (!upper_dev)
5370                         return 0;
5371                 if (!netif_is_bridge_master(upper_dev))
5372                         return 0;
5373                 if (!mlxsw_sp_lower_get(upper_dev))
5374                         return 0;
5375                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5376                                                   extack);
5377         case NETDEV_DOWN:
5378                 upper_dev = netdev_master_upper_dev_get(dev);
5379                 if (!upper_dev)
5380                         return 0;
5381                 if (!netif_is_bridge_master(upper_dev))
5382                         return 0;
5383                 if (!mlxsw_sp_lower_get(upper_dev))
5384                         return 0;
5385                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5386                 break;
5387         }
5388
5389         return 0;
5390 }
5391
5392 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5393                                     unsigned long event, void *ptr)
5394 {
5395         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5396         struct mlxsw_sp_span_entry *span_entry;
5397         struct mlxsw_sp *mlxsw_sp;
5398         int err = 0;
5399
5400         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5401         if (event == NETDEV_UNREGISTER) {
5402                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5403                 if (span_entry)
5404                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5405         }
5406         mlxsw_sp_span_respin(mlxsw_sp);
5407
5408         if (netif_is_vxlan(dev))
5409                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5410         if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
5411                 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
5412                                                        event, ptr);
5413         else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
5414                 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
5415                                                        event, ptr);
5416         else if (event == NETDEV_PRE_CHANGEADDR ||
5417                  event == NETDEV_CHANGEADDR ||
5418                  event == NETDEV_CHANGEMTU)
5419                 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
5420         else if (mlxsw_sp_is_vrf_event(event, ptr))
5421                 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
5422         else if (mlxsw_sp_port_dev_check(dev))
5423                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
5424         else if (netif_is_lag_master(dev))
5425                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5426         else if (is_vlan_dev(dev))
5427                 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
5428         else if (netif_is_bridge_master(dev))
5429                 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
5430         else if (netif_is_macvlan(dev))
5431                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5432
5433         return notifier_from_errno(err);
5434 }
5435
5436 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
5437         .notifier_call = mlxsw_sp_inetaddr_valid_event,
5438 };
5439
5440 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
5441         .notifier_call = mlxsw_sp_inet6addr_valid_event,
5442 };
5443
5444 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5445         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5446         {0, },
5447 };
5448
5449 static struct pci_driver mlxsw_sp1_pci_driver = {
5450         .name = mlxsw_sp1_driver_name,
5451         .id_table = mlxsw_sp1_pci_id_table,
5452 };
5453
5454 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5455         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5456         {0, },
5457 };
5458
5459 static struct pci_driver mlxsw_sp2_pci_driver = {
5460         .name = mlxsw_sp2_driver_name,
5461         .id_table = mlxsw_sp2_pci_id_table,
5462 };
5463
5464 static int __init mlxsw_sp_module_init(void)
5465 {
5466         int err;
5467
5468         register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5469         register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5470
5471         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5472         if (err)
5473                 goto err_sp1_core_driver_register;
5474
5475         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5476         if (err)
5477                 goto err_sp2_core_driver_register;
5478
5479         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5480         if (err)
5481                 goto err_sp1_pci_driver_register;
5482
5483         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5484         if (err)
5485                 goto err_sp2_pci_driver_register;
5486
5487         return 0;
5488
5489 err_sp2_pci_driver_register:
5490         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5491 err_sp1_pci_driver_register:
5492         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5493 err_sp2_core_driver_register:
5494         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5495 err_sp1_core_driver_register:
5496         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5497         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5498         return err;
5499 }
5500
5501 static void __exit mlxsw_sp_module_exit(void)
5502 {
5503         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5504         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5505         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5506         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5507         unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
5508         unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
5509 }
5510
5511 module_init(mlxsw_sp_module_init);
5512 module_exit(mlxsw_sp_module_exit);
5513
5514 MODULE_LICENSE("Dual BSD/GPL");
5515 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5516 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5517 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5518 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5519 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);