qed/qede: Add setter APIs support for RX flow classification
[sfrench/cifs-2.6.git] / drivers / net / ethernet / qlogic / qede / qede_ethtool.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/version.h>
33 #include <linux/types.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/string.h>
38 #include <linux/pci.h>
39 #include <linux/capability.h>
40 #include <linux/vmalloc.h>
41 #include "qede.h"
42 #include "qede_ptp.h"
43
44 #define QEDE_RQSTAT_OFFSET(stat_name) \
45          (offsetof(struct qede_rx_queue, stat_name))
46 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
47 #define QEDE_RQSTAT(stat_name) \
48          {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
49
50 #define QEDE_SELFTEST_POLL_COUNT 100
51
52 static const struct {
53         u64 offset;
54         char string[ETH_GSTRING_LEN];
55 } qede_rqstats_arr[] = {
56         QEDE_RQSTAT(rcv_pkts),
57         QEDE_RQSTAT(rx_hw_errors),
58         QEDE_RQSTAT(rx_alloc_errors),
59         QEDE_RQSTAT(rx_ip_frags),
60         QEDE_RQSTAT(xdp_no_pass),
61 };
62
63 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
64 #define QEDE_TQSTAT_OFFSET(stat_name) \
65         (offsetof(struct qede_tx_queue, stat_name))
66 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
67 #define QEDE_TQSTAT(stat_name) \
68         {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
69 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
70 static const struct {
71         u64 offset;
72         char string[ETH_GSTRING_LEN];
73 } qede_tqstats_arr[] = {
74         QEDE_TQSTAT(xmit_pkts),
75         QEDE_TQSTAT(stopped_cnt),
76 };
77
78 #define QEDE_STAT_OFFSET(stat_name, type, base) \
79         (offsetof(type, stat_name) + (base))
80 #define QEDE_STAT_STRING(stat_name)     (#stat_name)
81 #define _QEDE_STAT(stat_name, type, base, attr) \
82         {QEDE_STAT_OFFSET(stat_name, type, base), \
83          QEDE_STAT_STRING(stat_name), \
84          attr}
85 #define QEDE_STAT(stat_name) \
86         _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
87 #define QEDE_PF_STAT(stat_name) \
88         _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
89                    BIT(QEDE_STAT_PF_ONLY))
90 #define QEDE_PF_BB_STAT(stat_name) \
91         _QEDE_STAT(stat_name, struct qede_stats_bb, \
92                    offsetof(struct qede_stats, bb), \
93                    BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
94 #define QEDE_PF_AH_STAT(stat_name) \
95         _QEDE_STAT(stat_name, struct qede_stats_ah, \
96                    offsetof(struct qede_stats, ah), \
97                    BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
98 static const struct {
99         u64 offset;
100         char string[ETH_GSTRING_LEN];
101         unsigned long attr;
102 #define QEDE_STAT_PF_ONLY       0
103 #define QEDE_STAT_BB_ONLY       1
104 #define QEDE_STAT_AH_ONLY       2
105 } qede_stats_arr[] = {
106         QEDE_STAT(rx_ucast_bytes),
107         QEDE_STAT(rx_mcast_bytes),
108         QEDE_STAT(rx_bcast_bytes),
109         QEDE_STAT(rx_ucast_pkts),
110         QEDE_STAT(rx_mcast_pkts),
111         QEDE_STAT(rx_bcast_pkts),
112
113         QEDE_STAT(tx_ucast_bytes),
114         QEDE_STAT(tx_mcast_bytes),
115         QEDE_STAT(tx_bcast_bytes),
116         QEDE_STAT(tx_ucast_pkts),
117         QEDE_STAT(tx_mcast_pkts),
118         QEDE_STAT(tx_bcast_pkts),
119
120         QEDE_PF_STAT(rx_64_byte_packets),
121         QEDE_PF_STAT(rx_65_to_127_byte_packets),
122         QEDE_PF_STAT(rx_128_to_255_byte_packets),
123         QEDE_PF_STAT(rx_256_to_511_byte_packets),
124         QEDE_PF_STAT(rx_512_to_1023_byte_packets),
125         QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
126         QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
127         QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
128         QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
129         QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
130         QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
131         QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
132         QEDE_PF_STAT(tx_64_byte_packets),
133         QEDE_PF_STAT(tx_65_to_127_byte_packets),
134         QEDE_PF_STAT(tx_128_to_255_byte_packets),
135         QEDE_PF_STAT(tx_256_to_511_byte_packets),
136         QEDE_PF_STAT(tx_512_to_1023_byte_packets),
137         QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
138         QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
139         QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
140         QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
141         QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
142         QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
143         QEDE_PF_STAT(rx_mac_crtl_frames),
144         QEDE_PF_STAT(tx_mac_ctrl_frames),
145         QEDE_PF_STAT(rx_pause_frames),
146         QEDE_PF_STAT(tx_pause_frames),
147         QEDE_PF_STAT(rx_pfc_frames),
148         QEDE_PF_STAT(tx_pfc_frames),
149
150         QEDE_PF_STAT(rx_crc_errors),
151         QEDE_PF_STAT(rx_align_errors),
152         QEDE_PF_STAT(rx_carrier_errors),
153         QEDE_PF_STAT(rx_oversize_packets),
154         QEDE_PF_STAT(rx_jabbers),
155         QEDE_PF_STAT(rx_undersize_packets),
156         QEDE_PF_STAT(rx_fragments),
157         QEDE_PF_BB_STAT(tx_lpi_entry_count),
158         QEDE_PF_BB_STAT(tx_total_collisions),
159         QEDE_PF_STAT(brb_truncates),
160         QEDE_PF_STAT(brb_discards),
161         QEDE_STAT(no_buff_discards),
162         QEDE_PF_STAT(mftag_filter_discards),
163         QEDE_PF_STAT(mac_filter_discards),
164         QEDE_STAT(tx_err_drop_pkts),
165         QEDE_STAT(ttl0_discard),
166         QEDE_STAT(packet_too_big_discard),
167
168         QEDE_STAT(coalesced_pkts),
169         QEDE_STAT(coalesced_events),
170         QEDE_STAT(coalesced_aborts_num),
171         QEDE_STAT(non_coalesced_pkts),
172         QEDE_STAT(coalesced_bytes),
173 };
174
175 #define QEDE_NUM_STATS  ARRAY_SIZE(qede_stats_arr)
176 #define QEDE_STAT_IS_PF_ONLY(i) \
177         test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
178 #define QEDE_STAT_IS_BB_ONLY(i) \
179         test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
180 #define QEDE_STAT_IS_AH_ONLY(i) \
181         test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
182
183 enum {
184         QEDE_PRI_FLAG_CMT,
185         QEDE_PRI_FLAG_LEN,
186 };
187
188 static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
189         "Coupled-Function",
190 };
191
192 enum qede_ethtool_tests {
193         QEDE_ETHTOOL_INT_LOOPBACK,
194         QEDE_ETHTOOL_INTERRUPT_TEST,
195         QEDE_ETHTOOL_MEMORY_TEST,
196         QEDE_ETHTOOL_REGISTER_TEST,
197         QEDE_ETHTOOL_CLOCK_TEST,
198         QEDE_ETHTOOL_NVRAM_TEST,
199         QEDE_ETHTOOL_TEST_MAX
200 };
201
202 static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
203         "Internal loopback (offline)",
204         "Interrupt (online)\t",
205         "Memory (online)\t\t",
206         "Register (online)\t",
207         "Clock (online)\t\t",
208         "Nvram (online)\t\t",
209 };
210
211 static void qede_get_strings_stats_txq(struct qede_dev *edev,
212                                        struct qede_tx_queue *txq, u8 **buf)
213 {
214         int i;
215
216         for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
217                 if (txq->is_xdp)
218                         sprintf(*buf, "%d [XDP]: %s",
219                                 QEDE_TXQ_XDP_TO_IDX(edev, txq),
220                                 qede_tqstats_arr[i].string);
221                 else
222                         sprintf(*buf, "%d: %s", txq->index,
223                                 qede_tqstats_arr[i].string);
224                 *buf += ETH_GSTRING_LEN;
225         }
226 }
227
228 static void qede_get_strings_stats_rxq(struct qede_dev *edev,
229                                        struct qede_rx_queue *rxq, u8 **buf)
230 {
231         int i;
232
233         for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
234                 sprintf(*buf, "%d: %s", rxq->rxq_id,
235                         qede_rqstats_arr[i].string);
236                 *buf += ETH_GSTRING_LEN;
237         }
238 }
239
240 static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
241 {
242         return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
243                (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
244                (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
245 }
246
247 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
248 {
249         struct qede_fastpath *fp;
250         int i;
251
252         /* Account for queue statistics */
253         for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
254                 fp = &edev->fp_array[i];
255
256                 if (fp->type & QEDE_FASTPATH_RX)
257                         qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
258
259                 if (fp->type & QEDE_FASTPATH_XDP)
260                         qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
261
262                 if (fp->type & QEDE_FASTPATH_TX)
263                         qede_get_strings_stats_txq(edev, fp->txq, &buf);
264         }
265
266         /* Account for non-queue statistics */
267         for (i = 0; i < QEDE_NUM_STATS; i++) {
268                 if (qede_is_irrelevant_stat(edev, i))
269                         continue;
270                 strcpy(buf, qede_stats_arr[i].string);
271                 buf += ETH_GSTRING_LEN;
272         }
273 }
274
275 static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
276 {
277         struct qede_dev *edev = netdev_priv(dev);
278
279         switch (stringset) {
280         case ETH_SS_STATS:
281                 qede_get_strings_stats(edev, buf);
282                 break;
283         case ETH_SS_PRIV_FLAGS:
284                 memcpy(buf, qede_private_arr,
285                        ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
286                 break;
287         case ETH_SS_TEST:
288                 memcpy(buf, qede_tests_str_arr,
289                        ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
290                 break;
291         default:
292                 DP_VERBOSE(edev, QED_MSG_DEBUG,
293                            "Unsupported stringset 0x%08x\n", stringset);
294         }
295 }
296
297 static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
298 {
299         int i;
300
301         for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
302                 **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
303                 (*buf)++;
304         }
305 }
306
307 static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
308 {
309         int i;
310
311         for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
312                 **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
313                 (*buf)++;
314         }
315 }
316
317 static void qede_get_ethtool_stats(struct net_device *dev,
318                                    struct ethtool_stats *stats, u64 *buf)
319 {
320         struct qede_dev *edev = netdev_priv(dev);
321         struct qede_fastpath *fp;
322         int i;
323
324         qede_fill_by_demand_stats(edev);
325
326         /* Need to protect the access to the fastpath array */
327         __qede_lock(edev);
328
329         for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
330                 fp = &edev->fp_array[i];
331
332                 if (fp->type & QEDE_FASTPATH_RX)
333                         qede_get_ethtool_stats_rxq(fp->rxq, &buf);
334
335                 if (fp->type & QEDE_FASTPATH_XDP)
336                         qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
337
338                 if (fp->type & QEDE_FASTPATH_TX)
339                         qede_get_ethtool_stats_txq(fp->txq, &buf);
340         }
341
342         for (i = 0; i < QEDE_NUM_STATS; i++) {
343                 if (qede_is_irrelevant_stat(edev, i))
344                         continue;
345                 *buf = *((u64 *)(((void *)&edev->stats) +
346                                  qede_stats_arr[i].offset));
347
348                 buf++;
349         }
350
351         __qede_unlock(edev);
352 }
353
354 static int qede_get_sset_count(struct net_device *dev, int stringset)
355 {
356         struct qede_dev *edev = netdev_priv(dev);
357         int num_stats = QEDE_NUM_STATS, i;
358
359         switch (stringset) {
360         case ETH_SS_STATS:
361                 for (i = 0; i < QEDE_NUM_STATS; i++)
362                         if (qede_is_irrelevant_stat(edev, i))
363                                 num_stats--;
364
365                 /* Account for the Regular Tx statistics */
366                 num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
367
368                 /* Account for the Regular Rx statistics */
369                 num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
370
371                 /* Account for XDP statistics [if needed] */
372                 if (edev->xdp_prog)
373                         num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
374                 return num_stats;
375
376         case ETH_SS_PRIV_FLAGS:
377                 return QEDE_PRI_FLAG_LEN;
378         case ETH_SS_TEST:
379                 if (!IS_VF(edev))
380                         return QEDE_ETHTOOL_TEST_MAX;
381                 else
382                         return 0;
383         default:
384                 DP_VERBOSE(edev, QED_MSG_DEBUG,
385                            "Unsupported stringset 0x%08x\n", stringset);
386                 return -EINVAL;
387         }
388 }
389
390 static u32 qede_get_priv_flags(struct net_device *dev)
391 {
392         struct qede_dev *edev = netdev_priv(dev);
393
394         return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
395 }
396
397 struct qede_link_mode_mapping {
398         u32 qed_link_mode;
399         u32 ethtool_link_mode;
400 };
401
402 static const struct qede_link_mode_mapping qed_lm_map[] = {
403         {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
404         {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
405         {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
406         {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
407         {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
408         {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
409         {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
410         {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
411         {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
412         {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
413         {QED_LM_100000baseKR4_Full_BIT,
414          ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
415 };
416
417 #define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name)      \
418 {                                                               \
419         int i;                                                  \
420                                                                 \
421         for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) {          \
422                 if ((caps) & (qed_lm_map[i].qed_link_mode))     \
423                         __set_bit(qed_lm_map[i].ethtool_link_mode,\
424                                   lk_ksettings->link_modes.name); \
425         }                                                       \
426 }
427
428 #define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name)      \
429 {                                                               \
430         int i;                                                  \
431                                                                 \
432         for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) {          \
433                 if (test_bit(qed_lm_map[i].ethtool_link_mode,   \
434                              lk_ksettings->link_modes.name))    \
435                         caps |= qed_lm_map[i].qed_link_mode;    \
436         }                                                       \
437 }
438
439 static int qede_get_link_ksettings(struct net_device *dev,
440                                    struct ethtool_link_ksettings *cmd)
441 {
442         struct ethtool_link_settings *base = &cmd->base;
443         struct qede_dev *edev = netdev_priv(dev);
444         struct qed_link_output current_link;
445
446         __qede_lock(edev);
447
448         memset(&current_link, 0, sizeof(current_link));
449         edev->ops->common->get_link(edev->cdev, &current_link);
450
451         ethtool_link_ksettings_zero_link_mode(cmd, supported);
452         QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
453
454         ethtool_link_ksettings_zero_link_mode(cmd, advertising);
455         QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
456
457         ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
458         QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
459
460         if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
461                 base->speed = current_link.speed;
462                 base->duplex = current_link.duplex;
463         } else {
464                 base->speed = SPEED_UNKNOWN;
465                 base->duplex = DUPLEX_UNKNOWN;
466         }
467
468         __qede_unlock(edev);
469
470         base->port = current_link.port;
471         base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
472                         AUTONEG_DISABLE;
473
474         return 0;
475 }
476
477 static int qede_set_link_ksettings(struct net_device *dev,
478                                    const struct ethtool_link_ksettings *cmd)
479 {
480         const struct ethtool_link_settings *base = &cmd->base;
481         struct qede_dev *edev = netdev_priv(dev);
482         struct qed_link_output current_link;
483         struct qed_link_params params;
484
485         if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
486                 DP_INFO(edev, "Link settings are not allowed to be changed\n");
487                 return -EOPNOTSUPP;
488         }
489         memset(&current_link, 0, sizeof(current_link));
490         memset(&params, 0, sizeof(params));
491         edev->ops->common->get_link(edev->cdev, &current_link);
492
493         params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
494         params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
495         if (base->autoneg == AUTONEG_ENABLE) {
496                 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
497                         DP_INFO(edev, "Auto negotiation is not supported\n");
498                         return -EOPNOTSUPP;
499                 }
500
501                 params.autoneg = true;
502                 params.forced_speed = 0;
503                 QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
504         } else {                /* forced speed */
505                 params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
506                 params.autoneg = false;
507                 params.forced_speed = base->speed;
508                 switch (base->speed) {
509                 case SPEED_1000:
510                         if (!(current_link.supported_caps &
511                               QED_LM_1000baseT_Full_BIT)) {
512                                 DP_INFO(edev, "1G speed not supported\n");
513                                 return -EINVAL;
514                         }
515                         params.adv_speeds = QED_LM_1000baseT_Full_BIT;
516                         break;
517                 case SPEED_10000:
518                         if (!(current_link.supported_caps &
519                               QED_LM_10000baseKR_Full_BIT)) {
520                                 DP_INFO(edev, "10G speed not supported\n");
521                                 return -EINVAL;
522                         }
523                         params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
524                         break;
525                 case SPEED_25000:
526                         if (!(current_link.supported_caps &
527                               QED_LM_25000baseKR_Full_BIT)) {
528                                 DP_INFO(edev, "25G speed not supported\n");
529                                 return -EINVAL;
530                         }
531                         params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
532                         break;
533                 case SPEED_40000:
534                         if (!(current_link.supported_caps &
535                               QED_LM_40000baseLR4_Full_BIT)) {
536                                 DP_INFO(edev, "40G speed not supported\n");
537                                 return -EINVAL;
538                         }
539                         params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
540                         break;
541                 case SPEED_50000:
542                         if (!(current_link.supported_caps &
543                               QED_LM_50000baseKR2_Full_BIT)) {
544                                 DP_INFO(edev, "50G speed not supported\n");
545                                 return -EINVAL;
546                         }
547                         params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
548                         break;
549                 case SPEED_100000:
550                         if (!(current_link.supported_caps &
551                               QED_LM_100000baseKR4_Full_BIT)) {
552                                 DP_INFO(edev, "100G speed not supported\n");
553                                 return -EINVAL;
554                         }
555                         params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
556                         break;
557                 default:
558                         DP_INFO(edev, "Unsupported speed %u\n", base->speed);
559                         return -EINVAL;
560                 }
561         }
562
563         params.link_up = true;
564         edev->ops->common->set_link(edev->cdev, &params);
565
566         return 0;
567 }
568
569 static void qede_get_drvinfo(struct net_device *ndev,
570                              struct ethtool_drvinfo *info)
571 {
572         char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
573         struct qede_dev *edev = netdev_priv(ndev);
574
575         strlcpy(info->driver, "qede", sizeof(info->driver));
576         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
577
578         snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
579                  edev->dev_info.common.fw_major,
580                  edev->dev_info.common.fw_minor,
581                  edev->dev_info.common.fw_rev,
582                  edev->dev_info.common.fw_eng);
583
584         snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
585                  (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
586                  (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
587                  (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
588                  edev->dev_info.common.mfw_rev & 0xFF);
589
590         if ((strlen(storm) + strlen(mfw) + strlen("mfw storm  ")) <
591             sizeof(info->fw_version)) {
592                 snprintf(info->fw_version, sizeof(info->fw_version),
593                          "mfw %s storm %s", mfw, storm);
594         } else {
595                 snprintf(info->fw_version, sizeof(info->fw_version),
596                          "%s %s", mfw, storm);
597         }
598
599         strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
600 }
601
602 static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
603 {
604         struct qede_dev *edev = netdev_priv(ndev);
605
606         if (edev->dev_info.common.wol_support) {
607                 wol->supported = WAKE_MAGIC;
608                 wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
609         }
610 }
611
612 static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
613 {
614         struct qede_dev *edev = netdev_priv(ndev);
615         bool wol_requested;
616         int rc;
617
618         if (wol->wolopts & ~WAKE_MAGIC) {
619                 DP_INFO(edev,
620                         "Can't support WoL options other than magic-packet\n");
621                 return -EINVAL;
622         }
623
624         wol_requested = !!(wol->wolopts & WAKE_MAGIC);
625         if (wol_requested == edev->wol_enabled)
626                 return 0;
627
628         /* Need to actually change configuration */
629         if (!edev->dev_info.common.wol_support) {
630                 DP_INFO(edev, "Device doesn't support WoL\n");
631                 return -EINVAL;
632         }
633
634         rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
635         if (!rc)
636                 edev->wol_enabled = wol_requested;
637
638         return rc;
639 }
640
641 static u32 qede_get_msglevel(struct net_device *ndev)
642 {
643         struct qede_dev *edev = netdev_priv(ndev);
644
645         return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
646 }
647
648 static void qede_set_msglevel(struct net_device *ndev, u32 level)
649 {
650         struct qede_dev *edev = netdev_priv(ndev);
651         u32 dp_module = 0;
652         u8 dp_level = 0;
653
654         qede_config_debug(level, &dp_module, &dp_level);
655
656         edev->dp_level = dp_level;
657         edev->dp_module = dp_module;
658         edev->ops->common->update_msglvl(edev->cdev,
659                                          dp_module, dp_level);
660 }
661
662 static int qede_nway_reset(struct net_device *dev)
663 {
664         struct qede_dev *edev = netdev_priv(dev);
665         struct qed_link_output current_link;
666         struct qed_link_params link_params;
667
668         if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
669                 DP_INFO(edev, "Link settings are not allowed to be changed\n");
670                 return -EOPNOTSUPP;
671         }
672
673         if (!netif_running(dev))
674                 return 0;
675
676         memset(&current_link, 0, sizeof(current_link));
677         edev->ops->common->get_link(edev->cdev, &current_link);
678         if (!current_link.link_up)
679                 return 0;
680
681         /* Toggle the link */
682         memset(&link_params, 0, sizeof(link_params));
683         link_params.link_up = false;
684         edev->ops->common->set_link(edev->cdev, &link_params);
685         link_params.link_up = true;
686         edev->ops->common->set_link(edev->cdev, &link_params);
687
688         return 0;
689 }
690
691 static u32 qede_get_link(struct net_device *dev)
692 {
693         struct qede_dev *edev = netdev_priv(dev);
694         struct qed_link_output current_link;
695
696         memset(&current_link, 0, sizeof(current_link));
697         edev->ops->common->get_link(edev->cdev, &current_link);
698
699         return current_link.link_up;
700 }
701
702 static int qede_get_coalesce(struct net_device *dev,
703                              struct ethtool_coalesce *coal)
704 {
705         struct qede_dev *edev = netdev_priv(dev);
706         u16 rxc, txc;
707
708         memset(coal, 0, sizeof(struct ethtool_coalesce));
709         edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
710
711         coal->rx_coalesce_usecs = rxc;
712         coal->tx_coalesce_usecs = txc;
713
714         return 0;
715 }
716
717 static int qede_set_coalesce(struct net_device *dev,
718                              struct ethtool_coalesce *coal)
719 {
720         struct qede_dev *edev = netdev_priv(dev);
721         int i, rc = 0;
722         u16 rxc, txc, sb_id;
723
724         if (!netif_running(dev)) {
725                 DP_INFO(edev, "Interface is down\n");
726                 return -EINVAL;
727         }
728
729         if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
730             coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
731                 DP_INFO(edev,
732                         "Can't support requested %s coalesce value [max supported value %d]\n",
733                         coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
734                                                                    : "tx",
735                         QED_COALESCE_MAX);
736                 return -EINVAL;
737         }
738
739         rxc = (u16)coal->rx_coalesce_usecs;
740         txc = (u16)coal->tx_coalesce_usecs;
741         for_each_queue(i) {
742                 sb_id = edev->fp_array[i].sb_info->igu_sb_id;
743                 rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
744                                                      (u16)i, sb_id);
745                 if (rc) {
746                         DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
747                         return rc;
748                 }
749         }
750
751         return rc;
752 }
753
754 static void qede_get_ringparam(struct net_device *dev,
755                                struct ethtool_ringparam *ering)
756 {
757         struct qede_dev *edev = netdev_priv(dev);
758
759         ering->rx_max_pending = NUM_RX_BDS_MAX;
760         ering->rx_pending = edev->q_num_rx_buffers;
761         ering->tx_max_pending = NUM_TX_BDS_MAX;
762         ering->tx_pending = edev->q_num_tx_buffers;
763 }
764
765 static int qede_set_ringparam(struct net_device *dev,
766                               struct ethtool_ringparam *ering)
767 {
768         struct qede_dev *edev = netdev_priv(dev);
769
770         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
771                    "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
772                    ering->rx_pending, ering->tx_pending);
773
774         /* Validate legality of configuration */
775         if (ering->rx_pending > NUM_RX_BDS_MAX ||
776             ering->rx_pending < NUM_RX_BDS_MIN ||
777             ering->tx_pending > NUM_TX_BDS_MAX ||
778             ering->tx_pending < NUM_TX_BDS_MIN) {
779                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
780                            "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
781                            NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
782                            NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
783                 return -EINVAL;
784         }
785
786         /* Change ring size and re-load */
787         edev->q_num_rx_buffers = ering->rx_pending;
788         edev->q_num_tx_buffers = ering->tx_pending;
789
790         qede_reload(edev, NULL, false);
791
792         return 0;
793 }
794
795 static void qede_get_pauseparam(struct net_device *dev,
796                                 struct ethtool_pauseparam *epause)
797 {
798         struct qede_dev *edev = netdev_priv(dev);
799         struct qed_link_output current_link;
800
801         memset(&current_link, 0, sizeof(current_link));
802         edev->ops->common->get_link(edev->cdev, &current_link);
803
804         if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
805                 epause->autoneg = true;
806         if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
807                 epause->rx_pause = true;
808         if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
809                 epause->tx_pause = true;
810
811         DP_VERBOSE(edev, QED_MSG_DEBUG,
812                    "ethtool_pauseparam: cmd %d  autoneg %d  rx_pause %d  tx_pause %d\n",
813                    epause->cmd, epause->autoneg, epause->rx_pause,
814                    epause->tx_pause);
815 }
816
817 static int qede_set_pauseparam(struct net_device *dev,
818                                struct ethtool_pauseparam *epause)
819 {
820         struct qede_dev *edev = netdev_priv(dev);
821         struct qed_link_params params;
822         struct qed_link_output current_link;
823
824         if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
825                 DP_INFO(edev,
826                         "Pause settings are not allowed to be changed\n");
827                 return -EOPNOTSUPP;
828         }
829
830         memset(&current_link, 0, sizeof(current_link));
831         edev->ops->common->get_link(edev->cdev, &current_link);
832
833         memset(&params, 0, sizeof(params));
834         params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
835         if (epause->autoneg) {
836                 if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
837                         DP_INFO(edev, "autoneg not supported\n");
838                         return -EINVAL;
839                 }
840                 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
841         }
842         if (epause->rx_pause)
843                 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
844         if (epause->tx_pause)
845                 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
846
847         params.link_up = true;
848         edev->ops->common->set_link(edev->cdev, &params);
849
850         return 0;
851 }
852
853 static void qede_get_regs(struct net_device *ndev,
854                           struct ethtool_regs *regs, void *buffer)
855 {
856         struct qede_dev *edev = netdev_priv(ndev);
857
858         regs->version = 0;
859         memset(buffer, 0, regs->len);
860
861         if (edev->ops && edev->ops->common)
862                 edev->ops->common->dbg_all_data(edev->cdev, buffer);
863 }
864
865 static int qede_get_regs_len(struct net_device *ndev)
866 {
867         struct qede_dev *edev = netdev_priv(ndev);
868
869         if (edev->ops && edev->ops->common)
870                 return edev->ops->common->dbg_all_data_size(edev->cdev);
871         else
872                 return -EINVAL;
873 }
874
875 static void qede_update_mtu(struct qede_dev *edev,
876                             struct qede_reload_args *args)
877 {
878         edev->ndev->mtu = args->u.mtu;
879 }
880
881 /* Netdevice NDOs */
882 int qede_change_mtu(struct net_device *ndev, int new_mtu)
883 {
884         struct qede_dev *edev = netdev_priv(ndev);
885         struct qede_reload_args args;
886
887         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
888                    "Configuring MTU size of %d\n", new_mtu);
889
890         /* Set the mtu field and re-start the interface if needed */
891         args.u.mtu = new_mtu;
892         args.func = &qede_update_mtu;
893         qede_reload(edev, &args, false);
894
895         edev->ops->common->update_mtu(edev->cdev, new_mtu);
896
897         return 0;
898 }
899
900 static void qede_get_channels(struct net_device *dev,
901                               struct ethtool_channels *channels)
902 {
903         struct qede_dev *edev = netdev_priv(dev);
904
905         channels->max_combined = QEDE_MAX_RSS_CNT(edev);
906         channels->max_rx = QEDE_MAX_RSS_CNT(edev);
907         channels->max_tx = QEDE_MAX_RSS_CNT(edev);
908         channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
909                                         edev->fp_num_rx;
910         channels->tx_count = edev->fp_num_tx;
911         channels->rx_count = edev->fp_num_rx;
912 }
913
914 static int qede_set_channels(struct net_device *dev,
915                              struct ethtool_channels *channels)
916 {
917         struct qede_dev *edev = netdev_priv(dev);
918         u32 count;
919
920         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
921                    "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
922                    channels->rx_count, channels->tx_count,
923                    channels->other_count, channels->combined_count);
924
925         count = channels->rx_count + channels->tx_count +
926                         channels->combined_count;
927
928         /* We don't support `other' channels */
929         if (channels->other_count) {
930                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
931                            "command parameters not supported\n");
932                 return -EINVAL;
933         }
934
935         if (!(channels->combined_count || (channels->rx_count &&
936                                            channels->tx_count))) {
937                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
938                            "need to request at least one transmit and one receive channel\n");
939                 return -EINVAL;
940         }
941
942         if (count > QEDE_MAX_RSS_CNT(edev)) {
943                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
944                            "requested channels = %d max supported channels = %d\n",
945                            count, QEDE_MAX_RSS_CNT(edev));
946                 return -EINVAL;
947         }
948
949         /* Check if there was a change in the active parameters */
950         if ((count == QEDE_QUEUE_CNT(edev)) &&
951             (channels->tx_count == edev->fp_num_tx) &&
952             (channels->rx_count == edev->fp_num_rx)) {
953                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
954                            "No change in active parameters\n");
955                 return 0;
956         }
957
958         /* We need the number of queues to be divisible between the hwfns */
959         if ((count % edev->dev_info.common.num_hwfns) ||
960             (channels->tx_count % edev->dev_info.common.num_hwfns) ||
961             (channels->rx_count % edev->dev_info.common.num_hwfns)) {
962                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
963                            "Number of channels must be divisible by %04x\n",
964                            edev->dev_info.common.num_hwfns);
965                 return -EINVAL;
966         }
967
968         /* Set number of queues and reload if necessary */
969         edev->req_queues = count;
970         edev->req_num_tx = channels->tx_count;
971         edev->req_num_rx = channels->rx_count;
972         /* Reset the indirection table if rx queue count is updated */
973         if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
974                 edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
975                 memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
976         }
977
978         qede_reload(edev, NULL, false);
979
980         return 0;
981 }
982
983 static int qede_get_ts_info(struct net_device *dev,
984                             struct ethtool_ts_info *info)
985 {
986         struct qede_dev *edev = netdev_priv(dev);
987
988         return qede_ptp_get_ts_info(edev, info);
989 }
990
991 static int qede_set_phys_id(struct net_device *dev,
992                             enum ethtool_phys_id_state state)
993 {
994         struct qede_dev *edev = netdev_priv(dev);
995         u8 led_state = 0;
996
997         switch (state) {
998         case ETHTOOL_ID_ACTIVE:
999                 return 1;       /* cycle on/off once per second */
1000
1001         case ETHTOOL_ID_ON:
1002                 led_state = QED_LED_MODE_ON;
1003                 break;
1004
1005         case ETHTOOL_ID_OFF:
1006                 led_state = QED_LED_MODE_OFF;
1007                 break;
1008
1009         case ETHTOOL_ID_INACTIVE:
1010                 led_state = QED_LED_MODE_RESTORE;
1011                 break;
1012         }
1013
1014         edev->ops->common->set_led(edev->cdev, led_state);
1015
1016         return 0;
1017 }
1018
1019 static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
1020 {
1021         info->data = RXH_IP_SRC | RXH_IP_DST;
1022
1023         switch (info->flow_type) {
1024         case TCP_V4_FLOW:
1025         case TCP_V6_FLOW:
1026                 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1027                 break;
1028         case UDP_V4_FLOW:
1029                 if (edev->rss_caps & QED_RSS_IPV4_UDP)
1030                         info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1031                 break;
1032         case UDP_V6_FLOW:
1033                 if (edev->rss_caps & QED_RSS_IPV6_UDP)
1034                         info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1035                 break;
1036         case IPV4_FLOW:
1037         case IPV6_FLOW:
1038                 break;
1039         default:
1040                 info->data = 0;
1041                 break;
1042         }
1043
1044         return 0;
1045 }
1046
1047 static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1048                           u32 *rule_locs)
1049 {
1050         struct qede_dev *edev = netdev_priv(dev);
1051         int rc = 0;
1052
1053         switch (info->cmd) {
1054         case ETHTOOL_GRXRINGS:
1055                 info->data = QEDE_RSS_COUNT(edev);
1056                 break;
1057         case ETHTOOL_GRXFH:
1058                 rc = qede_get_rss_flags(edev, info);
1059                 break;
1060         case ETHTOOL_GRXCLSRLCNT:
1061                 info->rule_cnt = qede_get_arfs_filter_count(edev);
1062                 info->data = QEDE_RFS_MAX_FLTR;
1063                 break;
1064         case ETHTOOL_GRXCLSRULE:
1065                 rc = qede_get_cls_rule_entry(edev, info);
1066                 break;
1067         case ETHTOOL_GRXCLSRLALL:
1068                 rc = qede_get_cls_rule_all(edev, info, rule_locs);
1069                 break;
1070         default:
1071                 DP_ERR(edev, "Command parameters not supported\n");
1072                 rc = -EOPNOTSUPP;
1073         }
1074
1075         return rc;
1076 }
1077
1078 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
1079 {
1080         struct qed_update_vport_params *vport_update_params;
1081         u8 set_caps = 0, clr_caps = 0;
1082         int rc = 0;
1083
1084         DP_VERBOSE(edev, QED_MSG_DEBUG,
1085                    "Set rss flags command parameters: flow type = %d, data = %llu\n",
1086                    info->flow_type, info->data);
1087
1088         switch (info->flow_type) {
1089         case TCP_V4_FLOW:
1090         case TCP_V6_FLOW:
1091                 /* For TCP only 4-tuple hash is supported */
1092                 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
1093                                   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1094                         DP_INFO(edev, "Command parameters not supported\n");
1095                         return -EINVAL;
1096                 }
1097                 return 0;
1098         case UDP_V4_FLOW:
1099                 /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1100                 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1101                                    RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1102                         set_caps = QED_RSS_IPV4_UDP;
1103                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1104                                    "UDP 4-tuple enabled\n");
1105                 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1106                         clr_caps = QED_RSS_IPV4_UDP;
1107                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1108                                    "UDP 4-tuple disabled\n");
1109                 } else {
1110                         return -EINVAL;
1111                 }
1112                 break;
1113         case UDP_V6_FLOW:
1114                 /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1115                 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1116                                    RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1117                         set_caps = QED_RSS_IPV6_UDP;
1118                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1119                                    "UDP 4-tuple enabled\n");
1120                 } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1121                         clr_caps = QED_RSS_IPV6_UDP;
1122                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1123                                    "UDP 4-tuple disabled\n");
1124                 } else {
1125                         return -EINVAL;
1126                 }
1127                 break;
1128         case IPV4_FLOW:
1129         case IPV6_FLOW:
1130                 /* For IP only 2-tuple hash is supported */
1131                 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
1132                         DP_INFO(edev, "Command parameters not supported\n");
1133                         return -EINVAL;
1134                 }
1135                 return 0;
1136         case SCTP_V4_FLOW:
1137         case AH_ESP_V4_FLOW:
1138         case AH_V4_FLOW:
1139         case ESP_V4_FLOW:
1140         case SCTP_V6_FLOW:
1141         case AH_ESP_V6_FLOW:
1142         case AH_V6_FLOW:
1143         case ESP_V6_FLOW:
1144         case IP_USER_FLOW:
1145         case ETHER_FLOW:
1146                 /* RSS is not supported for these protocols */
1147                 if (info->data) {
1148                         DP_INFO(edev, "Command parameters not supported\n");
1149                         return -EINVAL;
1150                 }
1151                 return 0;
1152         default:
1153                 return -EINVAL;
1154         }
1155
1156         /* No action is needed if there is no change in the rss capability */
1157         if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
1158                 return 0;
1159
1160         /* Update internal configuration */
1161         edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
1162         edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
1163
1164         /* Re-configure if possible */
1165         __qede_lock(edev);
1166         if (edev->state == QEDE_STATE_OPEN) {
1167                 vport_update_params = vzalloc(sizeof(*vport_update_params));
1168                 if (!vport_update_params) {
1169                         __qede_unlock(edev);
1170                         return -ENOMEM;
1171                 }
1172                 qede_fill_rss_params(edev, &vport_update_params->rss_params,
1173                                      &vport_update_params->update_rss_flg);
1174                 rc = edev->ops->vport_update(edev->cdev, vport_update_params);
1175                 vfree(vport_update_params);
1176         }
1177         __qede_unlock(edev);
1178
1179         return rc;
1180 }
1181
1182 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
1183 {
1184         struct qede_dev *edev = netdev_priv(dev);
1185         int rc;
1186
1187         switch (info->cmd) {
1188         case ETHTOOL_SRXFH:
1189                 rc = qede_set_rss_flags(edev, info);
1190                 break;
1191         case ETHTOOL_SRXCLSRLINS:
1192                 rc = qede_add_cls_rule(edev, info);
1193                 break;
1194         case ETHTOOL_SRXCLSRLDEL:
1195                 rc = qede_del_cls_rule(edev, info);
1196                 break;
1197         default:
1198                 DP_INFO(edev, "Command parameters not supported\n");
1199                 rc = -EOPNOTSUPP;
1200         }
1201
1202         return rc;
1203 }
1204
1205 static u32 qede_get_rxfh_indir_size(struct net_device *dev)
1206 {
1207         return QED_RSS_IND_TABLE_SIZE;
1208 }
1209
1210 static u32 qede_get_rxfh_key_size(struct net_device *dev)
1211 {
1212         struct qede_dev *edev = netdev_priv(dev);
1213
1214         return sizeof(edev->rss_key);
1215 }
1216
1217 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
1218 {
1219         struct qede_dev *edev = netdev_priv(dev);
1220         int i;
1221
1222         if (hfunc)
1223                 *hfunc = ETH_RSS_HASH_TOP;
1224
1225         if (!indir)
1226                 return 0;
1227
1228         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
1229                 indir[i] = edev->rss_ind_table[i];
1230
1231         if (key)
1232                 memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
1233
1234         return 0;
1235 }
1236
1237 static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
1238                          const u8 *key, const u8 hfunc)
1239 {
1240         struct qed_update_vport_params *vport_update_params;
1241         struct qede_dev *edev = netdev_priv(dev);
1242         int i, rc = 0;
1243
1244         if (edev->dev_info.common.num_hwfns > 1) {
1245                 DP_INFO(edev,
1246                         "RSS configuration is not supported for 100G devices\n");
1247                 return -EOPNOTSUPP;
1248         }
1249
1250         if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1251                 return -EOPNOTSUPP;
1252
1253         if (!indir && !key)
1254                 return 0;
1255
1256         if (indir) {
1257                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
1258                         edev->rss_ind_table[i] = indir[i];
1259                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
1260         }
1261
1262         if (key) {
1263                 memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
1264                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
1265         }
1266
1267         __qede_lock(edev);
1268         if (edev->state == QEDE_STATE_OPEN) {
1269                 vport_update_params = vzalloc(sizeof(*vport_update_params));
1270                 if (!vport_update_params) {
1271                         __qede_unlock(edev);
1272                         return -ENOMEM;
1273                 }
1274                 qede_fill_rss_params(edev, &vport_update_params->rss_params,
1275                                      &vport_update_params->update_rss_flg);
1276                 rc = edev->ops->vport_update(edev->cdev, vport_update_params);
1277                 vfree(vport_update_params);
1278         }
1279         __qede_unlock(edev);
1280
1281         return rc;
1282 }
1283
1284 /* This function enables the interrupt generation and the NAPI on the device */
1285 static void qede_netif_start(struct qede_dev *edev)
1286 {
1287         int i;
1288
1289         if (!netif_running(edev->ndev))
1290                 return;
1291
1292         for_each_queue(i) {
1293                 /* Update and reenable interrupts */
1294                 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
1295                 napi_enable(&edev->fp_array[i].napi);
1296         }
1297 }
1298
1299 /* This function disables the NAPI and the interrupt generation on the device */
1300 static void qede_netif_stop(struct qede_dev *edev)
1301 {
1302         int i;
1303
1304         for_each_queue(i) {
1305                 napi_disable(&edev->fp_array[i].napi);
1306                 /* Disable interrupts */
1307                 qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
1308         }
1309 }
1310
1311 static int qede_selftest_transmit_traffic(struct qede_dev *edev,
1312                                           struct sk_buff *skb)
1313 {
1314         struct qede_tx_queue *txq = NULL;
1315         struct eth_tx_1st_bd *first_bd;
1316         dma_addr_t mapping;
1317         int i, idx;
1318         u16 val;
1319
1320         for_each_queue(i) {
1321                 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1322                         txq = edev->fp_array[i].txq;
1323                         break;
1324                 }
1325         }
1326
1327         if (!txq) {
1328                 DP_NOTICE(edev, "Tx path is not available\n");
1329                 return -1;
1330         }
1331
1332         /* Fill the entry in the SW ring and the BDs in the FW ring */
1333         idx = txq->sw_tx_prod;
1334         txq->sw_tx_ring.skbs[idx].skb = skb;
1335         first_bd = qed_chain_produce(&txq->tx_pbl);
1336         memset(first_bd, 0, sizeof(*first_bd));
1337         val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1338         first_bd->data.bd_flags.bitfields = val;
1339         val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
1340         val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1341         first_bd->data.bitfields |= cpu_to_le16(val);
1342
1343         /* Map skb linear data for DMA and set in the first BD */
1344         mapping = dma_map_single(&edev->pdev->dev, skb->data,
1345                                  skb_headlen(skb), DMA_TO_DEVICE);
1346         if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1347                 DP_NOTICE(edev, "SKB mapping failed\n");
1348                 return -ENOMEM;
1349         }
1350         BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1351
1352         /* update the first BD with the actual num BDs */
1353         first_bd->data.nbds = 1;
1354         txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1355         /* 'next page' entries are counted in the producer value */
1356         val = qed_chain_get_prod_idx(&txq->tx_pbl);
1357         txq->tx_db.data.bd_prod = cpu_to_le16(val);
1358
1359         /* wmb makes sure that the BDs data is updated before updating the
1360          * producer, otherwise FW may read old data from the BDs.
1361          */
1362         wmb();
1363         barrier();
1364         writel(txq->tx_db.raw, txq->doorbell_addr);
1365
1366         /* mmiowb is needed to synchronize doorbell writes from more than one
1367          * processor. It guarantees that the write arrives to the device before
1368          * the queue lock is released and another start_xmit is called (possibly
1369          * on another CPU). Without this barrier, the next doorbell can bypass
1370          * this doorbell. This is applicable to IA64/Altix systems.
1371          */
1372         mmiowb();
1373
1374         for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
1375                 if (qede_txq_has_work(txq))
1376                         break;
1377                 usleep_range(100, 200);
1378         }
1379
1380         if (!qede_txq_has_work(txq)) {
1381                 DP_NOTICE(edev, "Tx completion didn't happen\n");
1382                 return -1;
1383         }
1384
1385         first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
1386         dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
1387                          BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
1388         txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
1389         txq->sw_tx_ring.skbs[idx].skb = NULL;
1390
1391         return 0;
1392 }
1393
1394 static int qede_selftest_receive_traffic(struct qede_dev *edev)
1395 {
1396         u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
1397         struct eth_fast_path_rx_reg_cqe *fp_cqe;
1398         struct qede_rx_queue *rxq = NULL;
1399         struct sw_rx_data *sw_rx_data;
1400         union eth_rx_cqe *cqe;
1401         int i, iter, rc = 0;
1402         u8 *data_ptr;
1403
1404         for_each_queue(i) {
1405                 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
1406                         rxq = edev->fp_array[i].rxq;
1407                         break;
1408                 }
1409         }
1410
1411         if (!rxq) {
1412                 DP_NOTICE(edev, "Rx path is not available\n");
1413                 return -1;
1414         }
1415
1416         /* The packet is expected to receive on rx-queue 0 even though RSS is
1417          * enabled. This is because the queue 0 is configured as the default
1418          * queue and that the loopback traffic is not IP.
1419          */
1420         for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
1421                 if (!qede_has_rx_work(rxq)) {
1422                         usleep_range(100, 200);
1423                         continue;
1424                 }
1425
1426                 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1427                 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1428
1429                 /* Memory barrier to prevent the CPU from doing speculative
1430                  * reads of CQE/BD before reading hw_comp_cons. If the CQE is
1431                  * read before it is written by FW, then FW writes CQE and SB,
1432                  * and then the CPU reads the hw_comp_cons, it will use an old
1433                  * CQE.
1434                  */
1435                 rmb();
1436
1437                 /* Get the CQE from the completion ring */
1438                 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1439
1440                 /* Get the data from the SW ring */
1441                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1442                 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1443                 fp_cqe = &cqe->fast_path_regular;
1444                 len =  le16_to_cpu(fp_cqe->len_on_first_bd);
1445                 data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1446                                   fp_cqe->placement_offset +
1447                                   sw_rx_data->page_offset);
1448                 if (ether_addr_equal(data_ptr,  edev->ndev->dev_addr) &&
1449                     ether_addr_equal(data_ptr + ETH_ALEN,
1450                                      edev->ndev->dev_addr)) {
1451                         for (i = ETH_HLEN; i < len; i++)
1452                                 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1453                                         rc = -1;
1454                                         break;
1455                                 }
1456
1457                         qede_recycle_rx_bd_ring(rxq, 1);
1458                         qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1459                         break;
1460                 }
1461
1462                 DP_INFO(edev, "Not the transmitted packet\n");
1463                 qede_recycle_rx_bd_ring(rxq, 1);
1464                 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1465         }
1466
1467         if (iter == QEDE_SELFTEST_POLL_COUNT) {
1468                 DP_NOTICE(edev, "Failed to receive the traffic\n");
1469                 return -1;
1470         }
1471
1472         qede_update_rx_prod(edev, rxq);
1473
1474         return rc;
1475 }
1476
1477 static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
1478 {
1479         struct qed_link_params link_params;
1480         struct sk_buff *skb = NULL;
1481         int rc = 0, i;
1482         u32 pkt_size;
1483         u8 *packet;
1484
1485         if (!netif_running(edev->ndev)) {
1486                 DP_NOTICE(edev, "Interface is down\n");
1487                 return -EINVAL;
1488         }
1489
1490         qede_netif_stop(edev);
1491
1492         /* Bring up the link in Loopback mode */
1493         memset(&link_params, 0, sizeof(link_params));
1494         link_params.link_up = true;
1495         link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
1496         link_params.loopback_mode = loopback_mode;
1497         edev->ops->common->set_link(edev->cdev, &link_params);
1498
1499         /* Wait for loopback configuration to apply */
1500         msleep_interruptible(500);
1501
1502         /* prepare the loopback packet */
1503         pkt_size = edev->ndev->mtu + ETH_HLEN;
1504
1505         skb = netdev_alloc_skb(edev->ndev, pkt_size);
1506         if (!skb) {
1507                 DP_INFO(edev, "Can't allocate skb\n");
1508                 rc = -ENOMEM;
1509                 goto test_loopback_exit;
1510         }
1511         packet = skb_put(skb, pkt_size);
1512         ether_addr_copy(packet, edev->ndev->dev_addr);
1513         ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
1514         memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
1515         for (i = ETH_HLEN; i < pkt_size; i++)
1516                 packet[i] = (unsigned char)(i & 0xff);
1517
1518         rc = qede_selftest_transmit_traffic(edev, skb);
1519         if (rc)
1520                 goto test_loopback_exit;
1521
1522         rc = qede_selftest_receive_traffic(edev);
1523         if (rc)
1524                 goto test_loopback_exit;
1525
1526         DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
1527
1528 test_loopback_exit:
1529         dev_kfree_skb(skb);
1530
1531         /* Bring up the link in Normal mode */
1532         memset(&link_params, 0, sizeof(link_params));
1533         link_params.link_up = true;
1534         link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
1535         link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
1536         edev->ops->common->set_link(edev->cdev, &link_params);
1537
1538         /* Wait for loopback configuration to apply */
1539         msleep_interruptible(500);
1540
1541         qede_netif_start(edev);
1542
1543         return rc;
1544 }
1545
1546 static void qede_self_test(struct net_device *dev,
1547                            struct ethtool_test *etest, u64 *buf)
1548 {
1549         struct qede_dev *edev = netdev_priv(dev);
1550
1551         DP_VERBOSE(edev, QED_MSG_DEBUG,
1552                    "Self-test command parameters: offline = %d, external_lb = %d\n",
1553                    (etest->flags & ETH_TEST_FL_OFFLINE),
1554                    (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
1555
1556         memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
1557
1558         if (etest->flags & ETH_TEST_FL_OFFLINE) {
1559                 if (qede_selftest_run_loopback(edev,
1560                                                QED_LINK_LOOPBACK_INT_PHY)) {
1561                         buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
1562                         etest->flags |= ETH_TEST_FL_FAILED;
1563                 }
1564         }
1565
1566         if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
1567                 buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
1568                 etest->flags |= ETH_TEST_FL_FAILED;
1569         }
1570
1571         if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
1572                 buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
1573                 etest->flags |= ETH_TEST_FL_FAILED;
1574         }
1575
1576         if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
1577                 buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
1578                 etest->flags |= ETH_TEST_FL_FAILED;
1579         }
1580
1581         if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
1582                 buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
1583                 etest->flags |= ETH_TEST_FL_FAILED;
1584         }
1585
1586         if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
1587                 buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
1588                 etest->flags |= ETH_TEST_FL_FAILED;
1589         }
1590 }
1591
1592 static int qede_set_tunable(struct net_device *dev,
1593                             const struct ethtool_tunable *tuna,
1594                             const void *data)
1595 {
1596         struct qede_dev *edev = netdev_priv(dev);
1597         u32 val;
1598
1599         switch (tuna->id) {
1600         case ETHTOOL_RX_COPYBREAK:
1601                 val = *(u32 *)data;
1602                 if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
1603                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1604                                    "Invalid rx copy break value, range is [%u, %u]",
1605                                    QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
1606                         return -EINVAL;
1607                 }
1608
1609                 edev->rx_copybreak = *(u32 *)data;
1610                 break;
1611         default:
1612                 return -EOPNOTSUPP;
1613         }
1614
1615         return 0;
1616 }
1617
1618 static int qede_get_tunable(struct net_device *dev,
1619                             const struct ethtool_tunable *tuna, void *data)
1620 {
1621         struct qede_dev *edev = netdev_priv(dev);
1622
1623         switch (tuna->id) {
1624         case ETHTOOL_RX_COPYBREAK:
1625                 *(u32 *)data = edev->rx_copybreak;
1626                 break;
1627         default:
1628                 return -EOPNOTSUPP;
1629         }
1630
1631         return 0;
1632 }
1633
1634 static const struct ethtool_ops qede_ethtool_ops = {
1635         .get_link_ksettings = qede_get_link_ksettings,
1636         .set_link_ksettings = qede_set_link_ksettings,
1637         .get_drvinfo = qede_get_drvinfo,
1638         .get_regs_len = qede_get_regs_len,
1639         .get_regs = qede_get_regs,
1640         .get_wol = qede_get_wol,
1641         .set_wol = qede_set_wol,
1642         .get_msglevel = qede_get_msglevel,
1643         .set_msglevel = qede_set_msglevel,
1644         .nway_reset = qede_nway_reset,
1645         .get_link = qede_get_link,
1646         .get_coalesce = qede_get_coalesce,
1647         .set_coalesce = qede_set_coalesce,
1648         .get_ringparam = qede_get_ringparam,
1649         .set_ringparam = qede_set_ringparam,
1650         .get_pauseparam = qede_get_pauseparam,
1651         .set_pauseparam = qede_set_pauseparam,
1652         .get_strings = qede_get_strings,
1653         .set_phys_id = qede_set_phys_id,
1654         .get_ethtool_stats = qede_get_ethtool_stats,
1655         .get_priv_flags = qede_get_priv_flags,
1656         .get_sset_count = qede_get_sset_count,
1657         .get_rxnfc = qede_get_rxnfc,
1658         .set_rxnfc = qede_set_rxnfc,
1659         .get_rxfh_indir_size = qede_get_rxfh_indir_size,
1660         .get_rxfh_key_size = qede_get_rxfh_key_size,
1661         .get_rxfh = qede_get_rxfh,
1662         .set_rxfh = qede_set_rxfh,
1663         .get_ts_info = qede_get_ts_info,
1664         .get_channels = qede_get_channels,
1665         .set_channels = qede_set_channels,
1666         .self_test = qede_self_test,
1667         .get_tunable = qede_get_tunable,
1668         .set_tunable = qede_set_tunable,
1669 };
1670
1671 static const struct ethtool_ops qede_vf_ethtool_ops = {
1672         .get_link_ksettings = qede_get_link_ksettings,
1673         .get_drvinfo = qede_get_drvinfo,
1674         .get_msglevel = qede_get_msglevel,
1675         .set_msglevel = qede_set_msglevel,
1676         .get_link = qede_get_link,
1677         .get_ringparam = qede_get_ringparam,
1678         .set_ringparam = qede_set_ringparam,
1679         .get_strings = qede_get_strings,
1680         .get_ethtool_stats = qede_get_ethtool_stats,
1681         .get_priv_flags = qede_get_priv_flags,
1682         .get_sset_count = qede_get_sset_count,
1683         .get_rxnfc = qede_get_rxnfc,
1684         .set_rxnfc = qede_set_rxnfc,
1685         .get_rxfh_indir_size = qede_get_rxfh_indir_size,
1686         .get_rxfh_key_size = qede_get_rxfh_key_size,
1687         .get_rxfh = qede_get_rxfh,
1688         .set_rxfh = qede_set_rxfh,
1689         .get_channels = qede_get_channels,
1690         .set_channels = qede_set_channels,
1691         .get_tunable = qede_get_tunable,
1692         .set_tunable = qede_set_tunable,
1693 };
1694
1695 void qede_set_ethtool_ops(struct net_device *dev)
1696 {
1697         struct qede_dev *edev = netdev_priv(dev);
1698
1699         if (IS_VF(edev))
1700                 dev->ethtool_ops = &qede_vf_ethtool_ops;
1701         else
1702                 dev->ethtool_ops = &qede_ethtool_ops;
1703 }