ethtool: extend coalesce setting uAPI with CQE mode
[sfrench/cifs-2.6.git] / drivers / net / ethernet / cavium / liquidio / lio_ethtool.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/ethtool.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/pci.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn66xx_regs.h"
31 #include "cn66xx_device.h"
32 #include "cn23xx_pf_device.h"
33 #include "cn23xx_vf_device.h"
34
35 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
36
37 struct oct_intrmod_resp {
38         u64     rh;
39         struct oct_intrmod_cfg intrmod;
40         u64     status;
41 };
42
43 struct oct_mdio_cmd_resp {
44         u64 rh;
45         struct oct_mdio_cmd resp;
46         u64 status;
47 };
48
49 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
50
51 /* Octeon's interface mode of operation */
52 enum {
53         INTERFACE_MODE_DISABLED,
54         INTERFACE_MODE_RGMII,
55         INTERFACE_MODE_GMII,
56         INTERFACE_MODE_SPI,
57         INTERFACE_MODE_PCIE,
58         INTERFACE_MODE_XAUI,
59         INTERFACE_MODE_SGMII,
60         INTERFACE_MODE_PICMG,
61         INTERFACE_MODE_NPI,
62         INTERFACE_MODE_LOOP,
63         INTERFACE_MODE_SRIO,
64         INTERFACE_MODE_ILK,
65         INTERFACE_MODE_RXAUI,
66         INTERFACE_MODE_QSGMII,
67         INTERFACE_MODE_AGL,
68         INTERFACE_MODE_XLAUI,
69         INTERFACE_MODE_XFI,
70         INTERFACE_MODE_10G_KR,
71         INTERFACE_MODE_40G_KR4,
72         INTERFACE_MODE_MIXED,
73 };
74
75 #define OCT_ETHTOOL_REGDUMP_LEN  4096
76 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
77 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
78 #define OCT_ETHTOOL_REGSVER  1
79
80 /* statistics of PF */
81 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
82         "rx_packets",
83         "tx_packets",
84         "rx_bytes",
85         "tx_bytes",
86         "rx_errors",
87         "tx_errors",
88         "rx_dropped",
89         "tx_dropped",
90
91         "tx_total_sent",
92         "tx_total_fwd",
93         "tx_err_pko",
94         "tx_err_pki",
95         "tx_err_link",
96         "tx_err_drop",
97
98         "tx_tso",
99         "tx_tso_packets",
100         "tx_tso_err",
101         "tx_vxlan",
102
103         "tx_mcast",
104         "tx_bcast",
105
106         "mac_tx_total_pkts",
107         "mac_tx_total_bytes",
108         "mac_tx_mcast_pkts",
109         "mac_tx_bcast_pkts",
110         "mac_tx_ctl_packets",
111         "mac_tx_total_collisions",
112         "mac_tx_one_collision",
113         "mac_tx_multi_collision",
114         "mac_tx_max_collision_fail",
115         "mac_tx_max_deferral_fail",
116         "mac_tx_fifo_err",
117         "mac_tx_runts",
118
119         "rx_total_rcvd",
120         "rx_total_fwd",
121         "rx_mcast",
122         "rx_bcast",
123         "rx_jabber_err",
124         "rx_l2_err",
125         "rx_frame_err",
126         "rx_err_pko",
127         "rx_err_link",
128         "rx_err_drop",
129
130         "rx_vxlan",
131         "rx_vxlan_err",
132
133         "rx_lro_pkts",
134         "rx_lro_bytes",
135         "rx_total_lro",
136
137         "rx_lro_aborts",
138         "rx_lro_aborts_port",
139         "rx_lro_aborts_seq",
140         "rx_lro_aborts_tsval",
141         "rx_lro_aborts_timer",
142         "rx_fwd_rate",
143
144         "mac_rx_total_rcvd",
145         "mac_rx_bytes",
146         "mac_rx_total_bcst",
147         "mac_rx_total_mcst",
148         "mac_rx_runts",
149         "mac_rx_ctl_packets",
150         "mac_rx_fifo_err",
151         "mac_rx_dma_drop",
152         "mac_rx_fcs_err",
153
154         "link_state_changes",
155 };
156
157 /* statistics of VF */
158 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
159         "rx_packets",
160         "tx_packets",
161         "rx_bytes",
162         "tx_bytes",
163         "rx_errors",
164         "tx_errors",
165         "rx_dropped",
166         "tx_dropped",
167         "rx_mcast",
168         "tx_mcast",
169         "rx_bcast",
170         "tx_bcast",
171         "link_state_changes",
172 };
173
174 /* statistics of host tx queue */
175 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
176         "packets",
177         "bytes",
178         "dropped",
179         "iq_busy",
180         "sgentry_sent",
181
182         "fw_instr_posted",
183         "fw_instr_processed",
184         "fw_instr_dropped",
185         "fw_bytes_sent",
186
187         "tso",
188         "vxlan",
189         "txq_restart",
190 };
191
192 /* statistics of host rx queue */
193 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
194         "packets",
195         "bytes",
196         "dropped",
197         "dropped_nomem",
198         "dropped_toomany",
199         "fw_dropped",
200         "fw_pkts_received",
201         "fw_bytes_received",
202         "fw_dropped_nodispatch",
203
204         "vxlan",
205         "buffer_alloc_failure",
206 };
207
208 /* LiquidIO driver private flags */
209 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
210 };
211
212 #define OCTNIC_NCMD_AUTONEG_ON  0x1
213 #define OCTNIC_NCMD_PHY_ON      0x2
214
215 static int lio_get_link_ksettings(struct net_device *netdev,
216                                   struct ethtool_link_ksettings *ecmd)
217 {
218         struct lio *lio = GET_LIO(netdev);
219         struct octeon_device *oct = lio->oct_dev;
220         struct oct_link_info *linfo;
221
222         linfo = &lio->linfo;
223
224         ethtool_link_ksettings_zero_link_mode(ecmd, supported);
225         ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
226
227         switch (linfo->link.s.phy_type) {
228         case LIO_PHY_PORT_TP:
229                 ecmd->base.port = PORT_TP;
230                 ecmd->base.autoneg = AUTONEG_DISABLE;
231                 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
232                 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
233                 ethtool_link_ksettings_add_link_mode(ecmd, supported,
234                                                      10000baseT_Full);
235
236                 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
237                 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
238                                                      10000baseT_Full);
239
240                 break;
241
242         case LIO_PHY_PORT_FIBRE:
243                 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
244                     linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
245                     linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
246                     linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
247                         dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
248                         ecmd->base.transceiver = XCVR_EXTERNAL;
249                 } else {
250                         dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
251                                 linfo->link.s.if_mode);
252                 }
253
254                 ecmd->base.port = PORT_FIBRE;
255                 ecmd->base.autoneg = AUTONEG_DISABLE;
256                 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
257
258                 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
259                 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
260                 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
261                     oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
262                         if (OCTEON_CN23XX_PF(oct)) {
263                                 ethtool_link_ksettings_add_link_mode
264                                         (ecmd, supported, 25000baseSR_Full);
265                                 ethtool_link_ksettings_add_link_mode
266                                         (ecmd, supported, 25000baseKR_Full);
267                                 ethtool_link_ksettings_add_link_mode
268                                         (ecmd, supported, 25000baseCR_Full);
269
270                                 if (oct->no_speed_setting == 0)  {
271                                         ethtool_link_ksettings_add_link_mode
272                                                 (ecmd, supported,
273                                                  10000baseSR_Full);
274                                         ethtool_link_ksettings_add_link_mode
275                                                 (ecmd, supported,
276                                                  10000baseKR_Full);
277                                         ethtool_link_ksettings_add_link_mode
278                                                 (ecmd, supported,
279                                                  10000baseCR_Full);
280                                 }
281
282                                 if (oct->no_speed_setting == 0) {
283                                         liquidio_get_speed(lio);
284                                         liquidio_get_fec(lio);
285                                 } else {
286                                         oct->speed_setting = 25;
287                                 }
288
289                                 if (oct->speed_setting == 10) {
290                                         ethtool_link_ksettings_add_link_mode
291                                                 (ecmd, advertising,
292                                                  10000baseSR_Full);
293                                         ethtool_link_ksettings_add_link_mode
294                                                 (ecmd, advertising,
295                                                  10000baseKR_Full);
296                                         ethtool_link_ksettings_add_link_mode
297                                                 (ecmd, advertising,
298                                                  10000baseCR_Full);
299                                 }
300                                 if (oct->speed_setting == 25) {
301                                         ethtool_link_ksettings_add_link_mode
302                                                 (ecmd, advertising,
303                                                  25000baseSR_Full);
304                                         ethtool_link_ksettings_add_link_mode
305                                                 (ecmd, advertising,
306                                                  25000baseKR_Full);
307                                         ethtool_link_ksettings_add_link_mode
308                                                 (ecmd, advertising,
309                                                  25000baseCR_Full);
310                                 }
311
312                                 if (oct->no_speed_setting)
313                                         break;
314
315                                 ethtool_link_ksettings_add_link_mode
316                                         (ecmd, supported, FEC_RS);
317                                 ethtool_link_ksettings_add_link_mode
318                                         (ecmd, supported, FEC_NONE);
319                                         /*FEC_OFF*/
320                                 if (oct->props[lio->ifidx].fec == 1) {
321                                         /* ETHTOOL_FEC_RS */
322                                         ethtool_link_ksettings_add_link_mode
323                                                 (ecmd, advertising, FEC_RS);
324                                 } else {
325                                         /* ETHTOOL_FEC_OFF */
326                                         ethtool_link_ksettings_add_link_mode
327                                                 (ecmd, advertising, FEC_NONE);
328                                 }
329                         } else { /* VF */
330                                 if (linfo->link.s.speed == 10000) {
331                                         ethtool_link_ksettings_add_link_mode
332                                                 (ecmd, supported,
333                                                  10000baseSR_Full);
334                                         ethtool_link_ksettings_add_link_mode
335                                                 (ecmd, supported,
336                                                  10000baseKR_Full);
337                                         ethtool_link_ksettings_add_link_mode
338                                                 (ecmd, supported,
339                                                  10000baseCR_Full);
340
341                                         ethtool_link_ksettings_add_link_mode
342                                                 (ecmd, advertising,
343                                                  10000baseSR_Full);
344                                         ethtool_link_ksettings_add_link_mode
345                                                 (ecmd, advertising,
346                                                  10000baseKR_Full);
347                                         ethtool_link_ksettings_add_link_mode
348                                                 (ecmd, advertising,
349                                                  10000baseCR_Full);
350                                 }
351
352                                 if (linfo->link.s.speed == 25000) {
353                                         ethtool_link_ksettings_add_link_mode
354                                                 (ecmd, supported,
355                                                  25000baseSR_Full);
356                                         ethtool_link_ksettings_add_link_mode
357                                                 (ecmd, supported,
358                                                  25000baseKR_Full);
359                                         ethtool_link_ksettings_add_link_mode
360                                                 (ecmd, supported,
361                                                  25000baseCR_Full);
362
363                                         ethtool_link_ksettings_add_link_mode
364                                                 (ecmd, advertising,
365                                                  25000baseSR_Full);
366                                         ethtool_link_ksettings_add_link_mode
367                                                 (ecmd, advertising,
368                                                  25000baseKR_Full);
369                                         ethtool_link_ksettings_add_link_mode
370                                                 (ecmd, advertising,
371                                                  25000baseCR_Full);
372                                 }
373                         }
374                 } else {
375                         ethtool_link_ksettings_add_link_mode(ecmd, supported,
376                                                              10000baseT_Full);
377                         ethtool_link_ksettings_add_link_mode(ecmd, advertising,
378                                                              10000baseT_Full);
379                 }
380                 break;
381         }
382
383         if (linfo->link.s.link_up) {
384                 ecmd->base.speed = linfo->link.s.speed;
385                 ecmd->base.duplex = linfo->link.s.duplex;
386         } else {
387                 ecmd->base.speed = SPEED_UNKNOWN;
388                 ecmd->base.duplex = DUPLEX_UNKNOWN;
389         }
390
391         return 0;
392 }
393
394 static int lio_set_link_ksettings(struct net_device *netdev,
395                                   const struct ethtool_link_ksettings *ecmd)
396 {
397         const int speed = ecmd->base.speed;
398         struct lio *lio = GET_LIO(netdev);
399         struct oct_link_info *linfo;
400         struct octeon_device *oct;
401
402         oct = lio->oct_dev;
403
404         linfo = &lio->linfo;
405
406         if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
407               oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
408                 return -EOPNOTSUPP;
409
410         if (oct->no_speed_setting) {
411                 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
412                         __func__);
413                 return -EOPNOTSUPP;
414         }
415
416         if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
417              ecmd->base.duplex != linfo->link.s.duplex) ||
418              ecmd->base.autoneg != AUTONEG_DISABLE ||
419             (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
420              ecmd->base.speed != SPEED_UNKNOWN))
421                 return -EOPNOTSUPP;
422
423         if ((oct->speed_boot == speed / 1000) &&
424             oct->speed_boot == oct->speed_setting)
425                 return 0;
426
427         liquidio_set_speed(lio, speed / 1000);
428
429         dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
430                 oct->speed_setting);
431
432         return 0;
433 }
434
435 static void
436 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
437 {
438         struct lio *lio;
439         struct octeon_device *oct;
440
441         lio = GET_LIO(netdev);
442         oct = lio->oct_dev;
443
444         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
445         strcpy(drvinfo->driver, "liquidio");
446         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
447                 ETHTOOL_FWVERS_LEN);
448         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
449 }
450
451 static void
452 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
453 {
454         struct octeon_device *oct;
455         struct lio *lio;
456
457         lio = GET_LIO(netdev);
458         oct = lio->oct_dev;
459
460         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
461         strcpy(drvinfo->driver, "liquidio_vf");
462         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
463                 ETHTOOL_FWVERS_LEN);
464         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
465 }
466
467 static int
468 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
469 {
470         struct lio *lio = GET_LIO(netdev);
471         struct octeon_device *oct = lio->oct_dev;
472         struct octnic_ctrl_pkt nctrl;
473         int ret = 0;
474
475         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
476
477         nctrl.ncmd.u64 = 0;
478         nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
479         nctrl.ncmd.s.param1 = num_queues;
480         nctrl.ncmd.s.param2 = num_queues;
481         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
482         nctrl.netpndev = (u64)netdev;
483         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
484
485         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
486         if (ret) {
487                 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
488                         ret);
489                 return -1;
490         }
491
492         return 0;
493 }
494
495 static void
496 lio_ethtool_get_channels(struct net_device *dev,
497                          struct ethtool_channels *channel)
498 {
499         struct lio *lio = GET_LIO(dev);
500         struct octeon_device *oct = lio->oct_dev;
501         u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
502         u32 combined_count = 0, max_combined = 0;
503
504         if (OCTEON_CN6XXX(oct)) {
505                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
506
507                 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
508                 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
509                 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
510                 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
511         } else if (OCTEON_CN23XX_PF(oct)) {
512                 if (oct->sriov_info.sriov_enabled) {
513                         max_combined = lio->linfo.num_txpciq;
514                 } else {
515                         struct octeon_config *conf23_pf =
516                                 CHIP_CONF(oct, cn23xx_pf);
517
518                         max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
519                 }
520                 combined_count = oct->num_iqs;
521         } else if (OCTEON_CN23XX_VF(oct)) {
522                 u64 reg_val = 0ULL;
523                 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
524
525                 reg_val = octeon_read_csr64(oct, ctrl);
526                 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
527                 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
528                 combined_count = oct->num_iqs;
529         }
530
531         channel->max_rx = max_rx;
532         channel->max_tx = max_tx;
533         channel->max_combined = max_combined;
534         channel->rx_count = rx_count;
535         channel->tx_count = tx_count;
536         channel->combined_count = combined_count;
537 }
538
539 static int
540 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
541 {
542         struct msix_entry *msix_entries;
543         int num_msix_irqs = 0;
544         int i;
545
546         if (!oct->msix_on)
547                 return 0;
548
549         /* Disable the input and output queues now. No more packets will
550          * arrive from Octeon.
551          */
552         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
553
554         if (oct->msix_on) {
555                 if (OCTEON_CN23XX_PF(oct))
556                         num_msix_irqs = oct->num_msix_irqs - 1;
557                 else if (OCTEON_CN23XX_VF(oct))
558                         num_msix_irqs = oct->num_msix_irqs;
559
560                 msix_entries = (struct msix_entry *)oct->msix_entries;
561                 for (i = 0; i < num_msix_irqs; i++) {
562                         if (oct->ioq_vector[i].vector) {
563                                 /* clear the affinity_cpumask */
564                                 irq_set_affinity_hint(msix_entries[i].vector,
565                                                       NULL);
566                                 free_irq(msix_entries[i].vector,
567                                          &oct->ioq_vector[i]);
568                                 oct->ioq_vector[i].vector = 0;
569                         }
570                 }
571
572                 /* non-iov vector's argument is oct struct */
573                 if (OCTEON_CN23XX_PF(oct))
574                         free_irq(msix_entries[i].vector, oct);
575
576                 pci_disable_msix(oct->pci_dev);
577                 kfree(oct->msix_entries);
578                 oct->msix_entries = NULL;
579         }
580
581         kfree(oct->irq_name_storage);
582         oct->irq_name_storage = NULL;
583
584         if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
585                 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
586                 return -1;
587         }
588
589         if (octeon_setup_interrupt(oct, num_ioqs)) {
590                 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
591                 return -1;
592         }
593
594         /* Enable Octeon device interrupts */
595         oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
596
597         return 0;
598 }
599
600 static int
601 lio_ethtool_set_channels(struct net_device *dev,
602                          struct ethtool_channels *channel)
603 {
604         u32 combined_count, max_combined;
605         struct lio *lio = GET_LIO(dev);
606         struct octeon_device *oct = lio->oct_dev;
607         int stopped = 0;
608
609         if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
610                 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
611                 return -EINVAL;
612         }
613
614         if (!channel->combined_count || channel->other_count ||
615             channel->rx_count || channel->tx_count)
616                 return -EINVAL;
617
618         combined_count = channel->combined_count;
619
620         if (OCTEON_CN23XX_PF(oct)) {
621                 if (oct->sriov_info.sriov_enabled) {
622                         max_combined = lio->linfo.num_txpciq;
623                 } else {
624                         struct octeon_config *conf23_pf =
625                                 CHIP_CONF(oct,
626                                           cn23xx_pf);
627
628                         max_combined =
629                                 CFG_GET_IQ_MAX_Q(conf23_pf);
630                 }
631         } else if (OCTEON_CN23XX_VF(oct)) {
632                 u64 reg_val = 0ULL;
633                 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
634
635                 reg_val = octeon_read_csr64(oct, ctrl);
636                 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
637                 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
638         } else {
639                 return -EINVAL;
640         }
641
642         if (combined_count > max_combined || combined_count < 1)
643                 return -EINVAL;
644
645         if (combined_count == oct->num_iqs)
646                 return 0;
647
648         ifstate_set(lio, LIO_IFSTATE_RESETTING);
649
650         if (netif_running(dev)) {
651                 dev->netdev_ops->ndo_stop(dev);
652                 stopped = 1;
653         }
654
655         if (lio_reset_queues(dev, combined_count))
656                 return -EINVAL;
657
658         if (stopped)
659                 dev->netdev_ops->ndo_open(dev);
660
661         ifstate_reset(lio, LIO_IFSTATE_RESETTING);
662
663         return 0;
664 }
665
666 static int lio_get_eeprom_len(struct net_device *netdev)
667 {
668         u8 buf[192];
669         struct lio *lio = GET_LIO(netdev);
670         struct octeon_device *oct_dev = lio->oct_dev;
671         struct octeon_board_info *board_info;
672         int len;
673
674         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
675         len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
676                       board_info->name, board_info->serial_number,
677                       board_info->major, board_info->minor);
678
679         return len;
680 }
681
682 static int
683 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
684                u8 *bytes)
685 {
686         struct lio *lio = GET_LIO(netdev);
687         struct octeon_device *oct_dev = lio->oct_dev;
688         struct octeon_board_info *board_info;
689
690         if (eeprom->offset)
691                 return -EINVAL;
692
693         eeprom->magic = oct_dev->pci_dev->vendor;
694         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
695         sprintf((char *)bytes,
696                 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
697                 board_info->name, board_info->serial_number,
698                 board_info->major, board_info->minor);
699
700         return 0;
701 }
702
703 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
704 {
705         struct lio *lio = GET_LIO(netdev);
706         struct octeon_device *oct = lio->oct_dev;
707         struct octnic_ctrl_pkt nctrl;
708         int ret = 0;
709
710         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
711
712         nctrl.ncmd.u64 = 0;
713         nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
714         nctrl.ncmd.s.param1 = addr;
715         nctrl.ncmd.s.param2 = val;
716         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
717         nctrl.netpndev = (u64)netdev;
718         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
719
720         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
721         if (ret) {
722                 dev_err(&oct->pci_dev->dev,
723                         "Failed to configure gpio value, ret=%d\n", ret);
724                 return -EINVAL;
725         }
726
727         return 0;
728 }
729
730 static int octnet_id_active(struct net_device *netdev, int val)
731 {
732         struct lio *lio = GET_LIO(netdev);
733         struct octeon_device *oct = lio->oct_dev;
734         struct octnic_ctrl_pkt nctrl;
735         int ret = 0;
736
737         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
738
739         nctrl.ncmd.u64 = 0;
740         nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
741         nctrl.ncmd.s.param1 = val;
742         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
743         nctrl.netpndev = (u64)netdev;
744         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
745
746         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
747         if (ret) {
748                 dev_err(&oct->pci_dev->dev,
749                         "Failed to configure gpio value, ret=%d\n", ret);
750                 return -EINVAL;
751         }
752
753         return 0;
754 }
755
756 /* This routine provides PHY access routines for
757  * mdio  clause45 .
758  */
759 static int
760 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
761 {
762         struct octeon_device *oct_dev = lio->oct_dev;
763         struct octeon_soft_command *sc;
764         struct oct_mdio_cmd_resp *mdio_cmd_rsp;
765         struct oct_mdio_cmd *mdio_cmd;
766         int retval = 0;
767
768         sc = (struct octeon_soft_command *)
769                 octeon_alloc_soft_command(oct_dev,
770                                           sizeof(struct oct_mdio_cmd),
771                                           sizeof(struct oct_mdio_cmd_resp), 0);
772
773         if (!sc)
774                 return -ENOMEM;
775
776         mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
777         mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
778
779         mdio_cmd->op = op;
780         mdio_cmd->mdio_addr = loc;
781         if (op)
782                 mdio_cmd->value1 = *value;
783         octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
784
785         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
786
787         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
788                                     0, 0, 0);
789
790         init_completion(&sc->complete);
791         sc->sc_status = OCTEON_REQUEST_PENDING;
792
793         retval = octeon_send_soft_command(oct_dev, sc);
794         if (retval == IQ_SEND_FAILED) {
795                 dev_err(&oct_dev->pci_dev->dev,
796                         "octnet_mdio45_access instruction failed status: %x\n",
797                         retval);
798                 octeon_free_soft_command(oct_dev, sc);
799                 return -EBUSY;
800         } else {
801                 /* Sleep on a wait queue till the cond flag indicates that the
802                  * response arrived
803                  */
804                 retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
805                 if (retval)
806                         return retval;
807
808                 retval = mdio_cmd_rsp->status;
809                 if (retval) {
810                         dev_err(&oct_dev->pci_dev->dev,
811                                 "octnet mdio45 access failed: %x\n", retval);
812                         WRITE_ONCE(sc->caller_is_done, true);
813                         return -EBUSY;
814                 }
815
816                 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
817                                     sizeof(struct oct_mdio_cmd) / 8);
818
819                 if (!op)
820                         *value = mdio_cmd_rsp->resp.value1;
821
822                 WRITE_ONCE(sc->caller_is_done, true);
823         }
824
825         return retval;
826 }
827
828 static int lio_set_phys_id(struct net_device *netdev,
829                            enum ethtool_phys_id_state state)
830 {
831         struct lio *lio = GET_LIO(netdev);
832         struct octeon_device *oct = lio->oct_dev;
833         struct oct_link_info *linfo;
834         int value, ret;
835         u32 cur_ver;
836
837         linfo = &lio->linfo;
838         cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
839                              oct->fw_info.ver.min,
840                              oct->fw_info.ver.rev);
841
842         switch (state) {
843         case ETHTOOL_ID_ACTIVE:
844                 if (oct->chip_id == OCTEON_CN66XX) {
845                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
846                                            VITESSE_PHY_GPIO_DRIVEON);
847                         return 2;
848
849                 } else if (oct->chip_id == OCTEON_CN68XX) {
850                         /* Save the current LED settings */
851                         ret = octnet_mdio45_access(lio, 0,
852                                                    LIO68XX_LED_BEACON_ADDR,
853                                                    &lio->phy_beacon_val);
854                         if (ret)
855                                 return ret;
856
857                         ret = octnet_mdio45_access(lio, 0,
858                                                    LIO68XX_LED_CTRL_ADDR,
859                                                    &lio->led_ctrl_val);
860                         if (ret)
861                                 return ret;
862
863                         /* Configure Beacon values */
864                         value = LIO68XX_LED_BEACON_CFGON;
865                         ret = octnet_mdio45_access(lio, 1,
866                                                    LIO68XX_LED_BEACON_ADDR,
867                                                    &value);
868                         if (ret)
869                                 return ret;
870
871                         value = LIO68XX_LED_CTRL_CFGON;
872                         ret = octnet_mdio45_access(lio, 1,
873                                                    LIO68XX_LED_CTRL_ADDR,
874                                                    &value);
875                         if (ret)
876                                 return ret;
877                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
878                         octnet_id_active(netdev, LED_IDENTIFICATION_ON);
879                         if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
880                             cur_ver > OCT_FW_VER(1, 7, 2))
881                                 return 2;
882                         else
883                                 return 0;
884                 } else {
885                         return -EINVAL;
886                 }
887                 break;
888
889         case ETHTOOL_ID_ON:
890                 if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
891                     linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
892                     cur_ver > OCT_FW_VER(1, 7, 2))
893                         octnet_id_active(netdev, LED_IDENTIFICATION_ON);
894                 else if (oct->chip_id == OCTEON_CN66XX)
895                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
896                                            VITESSE_PHY_GPIO_HIGH);
897                 else
898                         return -EINVAL;
899
900                 break;
901
902         case ETHTOOL_ID_OFF:
903                 if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
904                     linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
905                     cur_ver > OCT_FW_VER(1, 7, 2))
906                         octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
907                 else if (oct->chip_id == OCTEON_CN66XX)
908                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
909                                            VITESSE_PHY_GPIO_LOW);
910                 else
911                         return -EINVAL;
912
913                 break;
914
915         case ETHTOOL_ID_INACTIVE:
916                 if (oct->chip_id == OCTEON_CN66XX) {
917                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
918                                            VITESSE_PHY_GPIO_DRIVEOFF);
919                 } else if (oct->chip_id == OCTEON_CN68XX) {
920                         /* Restore LED settings */
921                         ret = octnet_mdio45_access(lio, 1,
922                                                    LIO68XX_LED_CTRL_ADDR,
923                                                    &lio->led_ctrl_val);
924                         if (ret)
925                                 return ret;
926
927                         ret = octnet_mdio45_access(lio, 1,
928                                                    LIO68XX_LED_BEACON_ADDR,
929                                                    &lio->phy_beacon_val);
930                         if (ret)
931                                 return ret;
932                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
933                         octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
934
935                         return 0;
936                 } else {
937                         return -EINVAL;
938                 }
939                 break;
940
941         default:
942                 return -EINVAL;
943         }
944
945         return 0;
946 }
947
948 static void
949 lio_ethtool_get_ringparam(struct net_device *netdev,
950                           struct ethtool_ringparam *ering)
951 {
952         struct lio *lio = GET_LIO(netdev);
953         struct octeon_device *oct = lio->oct_dev;
954         u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
955             rx_pending = 0;
956
957         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
958                 return;
959
960         if (OCTEON_CN6XXX(oct)) {
961                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
962
963                 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
964                 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
965                 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
966                 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
967         } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
968                 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
969                 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
970                 rx_pending = oct->droq[0]->max_count;
971                 tx_pending = oct->instr_queue[0]->max_count;
972         }
973
974         ering->tx_pending = tx_pending;
975         ering->tx_max_pending = tx_max_pending;
976         ering->rx_pending = rx_pending;
977         ering->rx_max_pending = rx_max_pending;
978         ering->rx_mini_pending = 0;
979         ering->rx_jumbo_pending = 0;
980         ering->rx_mini_max_pending = 0;
981         ering->rx_jumbo_max_pending = 0;
982 }
983
984 static int lio_23xx_reconfigure_queue_count(struct lio *lio)
985 {
986         struct octeon_device *oct = lio->oct_dev;
987         u32 resp_size, data_size;
988         struct liquidio_if_cfg_resp *resp;
989         struct octeon_soft_command *sc;
990         union oct_nic_if_cfg if_cfg;
991         struct lio_version *vdata;
992         u32 ifidx_or_pfnum;
993         int retval;
994         int j;
995
996         resp_size = sizeof(struct liquidio_if_cfg_resp);
997         data_size = sizeof(struct lio_version);
998         sc = (struct octeon_soft_command *)
999                 octeon_alloc_soft_command(oct, data_size,
1000                                           resp_size, 0);
1001         if (!sc) {
1002                 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
1003                         __func__);
1004                 return -1;
1005         }
1006
1007         resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1008         vdata = (struct lio_version *)sc->virtdptr;
1009
1010         vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1011         vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1012         vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1013
1014         ifidx_or_pfnum = oct->pf_num;
1015
1016         if_cfg.u64 = 0;
1017         if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
1018         if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
1019         if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1020         if_cfg.s.gmx_port_id = oct->pf_num;
1021
1022         sc->iq_no = 0;
1023         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1024                                     OPCODE_NIC_QCOUNT_UPDATE, 0,
1025                                     if_cfg.u64, 0);
1026
1027         init_completion(&sc->complete);
1028         sc->sc_status = OCTEON_REQUEST_PENDING;
1029
1030         retval = octeon_send_soft_command(oct, sc);
1031         if (retval == IQ_SEND_FAILED) {
1032                 dev_err(&oct->pci_dev->dev,
1033                         "Sending iq/oq config failed status: %x\n",
1034                         retval);
1035                 octeon_free_soft_command(oct, sc);
1036                 return -EIO;
1037         }
1038
1039         retval = wait_for_sc_completion_timeout(oct, sc, 0);
1040         if (retval)
1041                 return retval;
1042
1043         retval = resp->status;
1044         if (retval) {
1045                 dev_err(&oct->pci_dev->dev,
1046                         "iq/oq config failed: %x\n", retval);
1047                 WRITE_ONCE(sc->caller_is_done, true);
1048                 return -1;
1049         }
1050
1051         octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1052                             (sizeof(struct liquidio_if_cfg_info)) >> 3);
1053
1054         lio->ifidx = ifidx_or_pfnum;
1055         lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1056         lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1057         for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1058                 lio->linfo.rxpciq[j].u64 =
1059                         resp->cfg_info.linfo.rxpciq[j].u64;
1060         }
1061
1062         for (j = 0; j < lio->linfo.num_txpciq; j++) {
1063                 lio->linfo.txpciq[j].u64 =
1064                         resp->cfg_info.linfo.txpciq[j].u64;
1065         }
1066
1067         lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1068         lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1069         lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1070         lio->txq = lio->linfo.txpciq[0].s.q_no;
1071         lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1072
1073         dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1074                  lio->linfo.num_rxpciq);
1075
1076         WRITE_ONCE(sc->caller_is_done, true);
1077
1078         return 0;
1079 }
1080
1081 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
1082 {
1083         struct lio *lio = GET_LIO(netdev);
1084         struct octeon_device *oct = lio->oct_dev;
1085         int i, queue_count_update = 0;
1086         struct napi_struct *napi, *n;
1087         int ret;
1088
1089         schedule_timeout_uninterruptible(msecs_to_jiffies(100));
1090
1091         if (wait_for_pending_requests(oct))
1092                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1093
1094         if (lio_wait_for_instr_fetch(oct))
1095                 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1096
1097         if (octeon_set_io_queues_off(oct)) {
1098                 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
1099                 return -1;
1100         }
1101
1102         /* Disable the input and output queues now. No more packets will
1103          * arrive from Octeon.
1104          */
1105         oct->fn_list.disable_io_queues(oct);
1106         /* Delete NAPI */
1107         list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1108                 netif_napi_del(napi);
1109
1110         if (num_qs != oct->num_iqs) {
1111                 ret = netif_set_real_num_rx_queues(netdev, num_qs);
1112                 if (ret) {
1113                         dev_err(&oct->pci_dev->dev,
1114                                 "Setting real number rx failed\n");
1115                         return ret;
1116                 }
1117
1118                 ret = netif_set_real_num_tx_queues(netdev, num_qs);
1119                 if (ret) {
1120                         dev_err(&oct->pci_dev->dev,
1121                                 "Setting real number tx failed\n");
1122                         return ret;
1123                 }
1124
1125                 /* The value of queue_count_update decides whether it is the
1126                  * queue count or the descriptor count that is being
1127                  * re-configured.
1128                  */
1129                 queue_count_update = 1;
1130         }
1131
1132         /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1133          * and SRIOV disabled. Few things like recreating queue zero, resetting
1134          * glists and IRQs are required for both. For the latter, some more
1135          * steps like updating sriov_info for the octeon device need to be done.
1136          */
1137         if (queue_count_update) {
1138                 cleanup_rx_oom_poll_fn(netdev);
1139
1140                 lio_delete_glists(lio);
1141
1142                 /* Delete mbox for PF which is SRIOV disabled because sriov_info
1143                  * will be now changed.
1144                  */
1145                 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1146                         oct->fn_list.free_mbox(oct);
1147         }
1148
1149         for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1150                 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1151                         continue;
1152                 octeon_delete_droq(oct, i);
1153         }
1154
1155         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1156                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1157                         continue;
1158                 octeon_delete_instr_queue(oct, i);
1159         }
1160
1161         if (queue_count_update) {
1162                 /* For PF re-configure sriov related information */
1163                 if ((OCTEON_CN23XX_PF(oct)) &&
1164                     !oct->sriov_info.sriov_enabled) {
1165                         oct->sriov_info.num_pf_rings = num_qs;
1166                         if (cn23xx_sriov_config(oct)) {
1167                                 dev_err(&oct->pci_dev->dev,
1168                                         "Queue reset aborted: SRIOV config failed\n");
1169                                 return -1;
1170                         }
1171
1172                         num_qs = oct->sriov_info.num_pf_rings;
1173                 }
1174         }
1175
1176         if (oct->fn_list.setup_device_regs(oct)) {
1177                 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
1178                 return -1;
1179         }
1180
1181         /* The following are needed in case of queue count re-configuration and
1182          * not for descriptor count re-configuration.
1183          */
1184         if (queue_count_update) {
1185                 if (octeon_setup_instr_queues(oct))
1186                         return -1;
1187
1188                 if (octeon_setup_output_queues(oct))
1189                         return -1;
1190
1191                 /* Recreating mbox for PF that is SRIOV disabled */
1192                 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1193                         if (oct->fn_list.setup_mbox(oct)) {
1194                                 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1195                                 return -1;
1196                         }
1197                 }
1198
1199                 /* Deleting and recreating IRQs whether the interface is SRIOV
1200                  * enabled or disabled.
1201                  */
1202                 if (lio_irq_reallocate_irqs(oct, num_qs)) {
1203                         dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1204                         return -1;
1205                 }
1206
1207                 /* Enable the input and output queues for this Octeon device */
1208                 if (oct->fn_list.enable_io_queues(oct)) {
1209                         dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1210                         return -1;
1211                 }
1212
1213                 for (i = 0; i < oct->num_oqs; i++)
1214                         writel(oct->droq[i]->max_count,
1215                                oct->droq[i]->pkts_credit_reg);
1216
1217                 /* Informing firmware about the new queue count. It is required
1218                  * for firmware to allocate more number of queues than those at
1219                  * load time.
1220                  */
1221                 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1222                         if (lio_23xx_reconfigure_queue_count(lio))
1223                                 return -1;
1224                 }
1225         }
1226
1227         /* Once firmware is aware of the new value, queues can be recreated */
1228         if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
1229                 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
1230                 return -1;
1231         }
1232
1233         if (queue_count_update) {
1234                 if (lio_setup_glists(oct, lio, num_qs)) {
1235                         dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1236                         return -1;
1237                 }
1238
1239                 if (setup_rx_oom_poll_fn(netdev)) {
1240                         dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
1241                         return 1;
1242                 }
1243
1244                 /* Send firmware the information about new number of queues
1245                  * if the interface is a VF or a PF that is SRIOV enabled.
1246                  */
1247                 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1248                         if (lio_send_queue_count_update(netdev, num_qs))
1249                                 return -1;
1250         }
1251
1252         return 0;
1253 }
1254
1255 static int lio_ethtool_set_ringparam(struct net_device *netdev,
1256                                      struct ethtool_ringparam *ering)
1257 {
1258         u32 rx_count, tx_count, rx_count_old, tx_count_old;
1259         struct lio *lio = GET_LIO(netdev);
1260         struct octeon_device *oct = lio->oct_dev;
1261         int stopped = 0;
1262
1263         if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
1264                 return -EINVAL;
1265
1266         if (ering->rx_mini_pending || ering->rx_jumbo_pending)
1267                 return -EINVAL;
1268
1269         rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
1270                            CN23XX_MAX_OQ_DESCRIPTORS);
1271         tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
1272                            CN23XX_MAX_IQ_DESCRIPTORS);
1273
1274         rx_count_old = oct->droq[0]->max_count;
1275         tx_count_old = oct->instr_queue[0]->max_count;
1276
1277         if (rx_count == rx_count_old && tx_count == tx_count_old)
1278                 return 0;
1279
1280         ifstate_set(lio, LIO_IFSTATE_RESETTING);
1281
1282         if (netif_running(netdev)) {
1283                 netdev->netdev_ops->ndo_stop(netdev);
1284                 stopped = 1;
1285         }
1286
1287         /* Change RX/TX DESCS  count */
1288         if (tx_count != tx_count_old)
1289                 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1290                                             tx_count);
1291         if (rx_count != rx_count_old)
1292                 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1293                                             rx_count);
1294
1295         if (lio_reset_queues(netdev, oct->num_iqs))
1296                 goto err_lio_reset_queues;
1297
1298         if (stopped)
1299                 netdev->netdev_ops->ndo_open(netdev);
1300
1301         ifstate_reset(lio, LIO_IFSTATE_RESETTING);
1302
1303         return 0;
1304
1305 err_lio_reset_queues:
1306         if (tx_count != tx_count_old)
1307                 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1308                                             tx_count_old);
1309         if (rx_count != rx_count_old)
1310                 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
1311                                             rx_count_old);
1312         return -EINVAL;
1313 }
1314
1315 static u32 lio_get_msglevel(struct net_device *netdev)
1316 {
1317         struct lio *lio = GET_LIO(netdev);
1318
1319         return lio->msg_enable;
1320 }
1321
1322 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
1323 {
1324         struct lio *lio = GET_LIO(netdev);
1325
1326         if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
1327                 if (msglvl & NETIF_MSG_HW)
1328                         liquidio_set_feature(netdev,
1329                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
1330                 else
1331                         liquidio_set_feature(netdev,
1332                                              OCTNET_CMD_VERBOSE_DISABLE, 0);
1333         }
1334
1335         lio->msg_enable = msglvl;
1336 }
1337
1338 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
1339 {
1340         struct lio *lio = GET_LIO(netdev);
1341
1342         lio->msg_enable = msglvl;
1343 }
1344
1345 static void
1346 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1347 {
1348         /* Notes: Not supporting any auto negotiation in these
1349          * drivers. Just report pause frame support.
1350          */
1351         struct lio *lio = GET_LIO(netdev);
1352         struct octeon_device *oct = lio->oct_dev;
1353
1354         pause->autoneg = 0;
1355
1356         pause->tx_pause = oct->tx_pause;
1357         pause->rx_pause = oct->rx_pause;
1358 }
1359
1360 static int
1361 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
1362 {
1363         /* Notes: Not supporting any auto negotiation in these
1364          * drivers.
1365          */
1366         struct lio *lio = GET_LIO(netdev);
1367         struct octeon_device *oct = lio->oct_dev;
1368         struct octnic_ctrl_pkt nctrl;
1369         struct oct_link_info *linfo = &lio->linfo;
1370
1371         int ret = 0;
1372
1373         if (oct->chip_id != OCTEON_CN23XX_PF_VID)
1374                 return -EINVAL;
1375
1376         if (linfo->link.s.duplex == 0) {
1377                 /*no flow control for half duplex*/
1378                 if (pause->rx_pause || pause->tx_pause)
1379                         return -EINVAL;
1380         }
1381
1382         /*do not support autoneg of link flow control*/
1383         if (pause->autoneg == AUTONEG_ENABLE)
1384                 return -EINVAL;
1385
1386         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1387
1388         nctrl.ncmd.u64 = 0;
1389         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1390         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1391         nctrl.netpndev = (u64)netdev;
1392         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1393
1394         if (pause->rx_pause) {
1395                 /*enable rx pause*/
1396                 nctrl.ncmd.s.param1 = 1;
1397         } else {
1398                 /*disable rx pause*/
1399                 nctrl.ncmd.s.param1 = 0;
1400         }
1401
1402         if (pause->tx_pause) {
1403                 /*enable tx pause*/
1404                 nctrl.ncmd.s.param2 = 1;
1405         } else {
1406                 /*disable tx pause*/
1407                 nctrl.ncmd.s.param2 = 0;
1408         }
1409
1410         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1411         if (ret) {
1412                 dev_err(&oct->pci_dev->dev,
1413                         "Failed to set pause parameter, ret=%d\n", ret);
1414                 return -EINVAL;
1415         }
1416
1417         oct->rx_pause = pause->rx_pause;
1418         oct->tx_pause = pause->tx_pause;
1419
1420         return 0;
1421 }
1422
1423 static void
1424 lio_get_ethtool_stats(struct net_device *netdev,
1425                       struct ethtool_stats *stats  __attribute__((unused)),
1426                       u64 *data)
1427 {
1428         struct lio *lio = GET_LIO(netdev);
1429         struct octeon_device *oct_dev = lio->oct_dev;
1430         struct rtnl_link_stats64 lstats;
1431         int i = 0, j;
1432
1433         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1434                 return;
1435
1436         netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1437         /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1438         data[i++] = lstats.rx_packets;
1439         /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1440         data[i++] = lstats.tx_packets;
1441         /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1442         data[i++] = lstats.rx_bytes;
1443         /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1444         data[i++] = lstats.tx_bytes;
1445         data[i++] = lstats.rx_errors +
1446                         oct_dev->link_stats.fromwire.fcs_err +
1447                         oct_dev->link_stats.fromwire.jabber_err +
1448                         oct_dev->link_stats.fromwire.l2_err +
1449                         oct_dev->link_stats.fromwire.frame_err;
1450         data[i++] = lstats.tx_errors;
1451         /*sum of oct->droq[oq_no]->stats->rx_dropped +
1452          *oct->droq[oq_no]->stats->dropped_nodispatch +
1453          *oct->droq[oq_no]->stats->dropped_toomany +
1454          *oct->droq[oq_no]->stats->dropped_nomem
1455          */
1456         data[i++] = lstats.rx_dropped +
1457                         oct_dev->link_stats.fromwire.fifo_err +
1458                         oct_dev->link_stats.fromwire.dmac_drop +
1459                         oct_dev->link_stats.fromwire.red_drops +
1460                         oct_dev->link_stats.fromwire.fw_err_pko +
1461                         oct_dev->link_stats.fromwire.fw_err_link +
1462                         oct_dev->link_stats.fromwire.fw_err_drop;
1463         /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1464         data[i++] = lstats.tx_dropped +
1465                         oct_dev->link_stats.fromhost.max_collision_fail +
1466                         oct_dev->link_stats.fromhost.max_deferral_fail +
1467                         oct_dev->link_stats.fromhost.total_collisions +
1468                         oct_dev->link_stats.fromhost.fw_err_pko +
1469                         oct_dev->link_stats.fromhost.fw_err_link +
1470                         oct_dev->link_stats.fromhost.fw_err_drop +
1471                         oct_dev->link_stats.fromhost.fw_err_pki;
1472
1473         /* firmware tx stats */
1474         /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1475          *fromhost.fw_total_sent
1476          */
1477         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1478         /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1479         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1480         /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1481         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1482         /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1483         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1484         /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1485         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1486         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1487          *fw_err_drop
1488          */
1489         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1490
1491         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1492         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1493         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1494          *fw_tso_fwd
1495          */
1496         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1497         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1498          *fw_err_tso
1499          */
1500         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1501         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1502          *fw_tx_vxlan
1503          */
1504         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1505
1506         /* Multicast packets sent by this port */
1507         data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1508         data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1509
1510         /* mac tx statistics */
1511         /*CVMX_BGXX_CMRX_TX_STAT5 */
1512         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1513         /*CVMX_BGXX_CMRX_TX_STAT4 */
1514         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1515         /*CVMX_BGXX_CMRX_TX_STAT15 */
1516         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1517         /*CVMX_BGXX_CMRX_TX_STAT14 */
1518         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1519         /*CVMX_BGXX_CMRX_TX_STAT17 */
1520         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1521         /*CVMX_BGXX_CMRX_TX_STAT0 */
1522         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1523         /*CVMX_BGXX_CMRX_TX_STAT3 */
1524         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1525         /*CVMX_BGXX_CMRX_TX_STAT2 */
1526         data[i++] =
1527                 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1528         /*CVMX_BGXX_CMRX_TX_STAT0 */
1529         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1530         /*CVMX_BGXX_CMRX_TX_STAT1 */
1531         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1532         /*CVMX_BGXX_CMRX_TX_STAT16 */
1533         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1534         /*CVMX_BGXX_CMRX_TX_STAT6 */
1535         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1536
1537         /* RX firmware stats */
1538         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1539          *fw_total_rcvd
1540          */
1541         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1542         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1543          *fw_total_fwd
1544          */
1545         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1546         /* Multicast packets received on this port */
1547         data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1548         data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1549         /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1550         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1551         /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1552         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1553         /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1554         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1555         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1556          *fw_err_pko
1557          */
1558         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1559         /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1560         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1561         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1562          *fromwire.fw_err_drop
1563          */
1564         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1565
1566         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1567          *fromwire.fw_rx_vxlan
1568          */
1569         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1570         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1571          *fromwire.fw_rx_vxlan_err
1572          */
1573         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1574
1575         /* LRO */
1576         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1577          *fw_lro_pkts
1578          */
1579         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1580         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1581          *fw_lro_octs
1582          */
1583         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1584         /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1585         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1586         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1587         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1588         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1589          *fw_lro_aborts_port
1590          */
1591         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1592         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1593          *fw_lro_aborts_seq
1594          */
1595         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1596         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1597          *fw_lro_aborts_tsval
1598          */
1599         data[i++] =
1600                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1601         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1602          *fw_lro_aborts_timer
1603          */
1604         /* intrmod: packet forward rate */
1605         data[i++] =
1606                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1607         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1608         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1609
1610         /* mac: link-level stats */
1611         /*CVMX_BGXX_CMRX_RX_STAT0 */
1612         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1613         /*CVMX_BGXX_CMRX_RX_STAT1 */
1614         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1615         /*CVMX_PKI_STATX_STAT5 */
1616         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1617         /*CVMX_PKI_STATX_STAT5 */
1618         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1619         /*wqe->word2.err_code or wqe->word2.err_level */
1620         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1621         /*CVMX_BGXX_CMRX_RX_STAT2 */
1622         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1623         /*CVMX_BGXX_CMRX_RX_STAT6 */
1624         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1625         /*CVMX_BGXX_CMRX_RX_STAT4 */
1626         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1627         /*wqe->word2.err_code or wqe->word2.err_level */
1628         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1629         /*lio->link_changes*/
1630         data[i++] = CVM_CAST64(lio->link_changes);
1631
1632         for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1633                 if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1634                         continue;
1635                 /*packets to network port*/
1636                 /*# of packets tx to network */
1637                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1638                 /*# of bytes tx to network */
1639                 data[i++] =
1640                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1641                 /*# of packets dropped */
1642                 data[i++] =
1643                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1644                 /*# of tx fails due to queue full */
1645                 data[i++] =
1646                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1647                 /*XXX gather entries sent */
1648                 data[i++] =
1649                         CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1650
1651                 /*instruction to firmware: data and control */
1652                 /*# of instructions to the queue */
1653                 data[i++] =
1654                         CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1655                 /*# of instructions processed */
1656                 data[i++] = CVM_CAST64(
1657                                 oct_dev->instr_queue[j]->stats.instr_processed);
1658                 /*# of instructions could not be processed */
1659                 data[i++] = CVM_CAST64(
1660                                 oct_dev->instr_queue[j]->stats.instr_dropped);
1661                 /*bytes sent through the queue */
1662                 data[i++] =
1663                         CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1664
1665                 /*tso request*/
1666                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1667                 /*vxlan request*/
1668                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1669                 /*txq restart*/
1670                 data[i++] =
1671                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1672         }
1673
1674         /* RX */
1675         for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1676                 if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1677                         continue;
1678
1679                 /*packets send to TCP/IP network stack */
1680                 /*# of packets to network stack */
1681                 data[i++] =
1682                         CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1683                 /*# of bytes to network stack */
1684                 data[i++] =
1685                         CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1686                 /*# of packets dropped */
1687                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1688                                        oct_dev->droq[j]->stats.dropped_toomany +
1689                                        oct_dev->droq[j]->stats.rx_dropped);
1690                 data[i++] =
1691                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1692                 data[i++] =
1693                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1694                 data[i++] =
1695                         CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1696
1697                 /*control and data path*/
1698                 data[i++] =
1699                         CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1700                 data[i++] =
1701                         CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1702                 data[i++] =
1703                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1704
1705                 data[i++] =
1706                         CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1707                 data[i++] =
1708                         CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1709         }
1710 }
1711
1712 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1713                                      struct ethtool_stats *stats
1714                                      __attribute__((unused)),
1715                                      u64 *data)
1716 {
1717         struct rtnl_link_stats64 lstats;
1718         struct lio *lio = GET_LIO(netdev);
1719         struct octeon_device *oct_dev = lio->oct_dev;
1720         int i = 0, j, vj;
1721
1722         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1723                 return;
1724
1725         netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1726         /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1727         data[i++] = lstats.rx_packets;
1728         /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1729         data[i++] = lstats.tx_packets;
1730         /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1731         data[i++] = lstats.rx_bytes;
1732         /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1733         data[i++] = lstats.tx_bytes;
1734         data[i++] = lstats.rx_errors;
1735         data[i++] = lstats.tx_errors;
1736          /* sum of oct->droq[oq_no]->stats->rx_dropped +
1737           * oct->droq[oq_no]->stats->dropped_nodispatch +
1738           * oct->droq[oq_no]->stats->dropped_toomany +
1739           * oct->droq[oq_no]->stats->dropped_nomem
1740           */
1741         data[i++] = lstats.rx_dropped;
1742         /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1743         data[i++] = lstats.tx_dropped +
1744                 oct_dev->link_stats.fromhost.fw_err_drop;
1745
1746         data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1747         data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1748         data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1749         data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1750
1751         /* lio->link_changes */
1752         data[i++] = CVM_CAST64(lio->link_changes);
1753
1754         for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1755                 j = lio->linfo.txpciq[vj].s.q_no;
1756
1757                 /* packets to network port */
1758                 /* # of packets tx to network */
1759                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1760                  /* # of bytes tx to network */
1761                 data[i++] = CVM_CAST64(
1762                                 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1763                 /* # of packets dropped */
1764                 data[i++] = CVM_CAST64(
1765                                 oct_dev->instr_queue[j]->stats.tx_dropped);
1766                 /* # of tx fails due to queue full */
1767                 data[i++] = CVM_CAST64(
1768                                 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1769                 /* XXX gather entries sent */
1770                 data[i++] = CVM_CAST64(
1771                                 oct_dev->instr_queue[j]->stats.sgentry_sent);
1772
1773                 /* instruction to firmware: data and control */
1774                 /* # of instructions to the queue */
1775                 data[i++] = CVM_CAST64(
1776                                 oct_dev->instr_queue[j]->stats.instr_posted);
1777                 /* # of instructions processed */
1778                 data[i++] =
1779                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1780                 /* # of instructions could not be processed */
1781                 data[i++] =
1782                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1783                 /* bytes sent through the queue */
1784                 data[i++] = CVM_CAST64(
1785                                 oct_dev->instr_queue[j]->stats.bytes_sent);
1786                 /* tso request */
1787                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1788                 /* vxlan request */
1789                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1790                 /* txq restart */
1791                 data[i++] = CVM_CAST64(
1792                                 oct_dev->instr_queue[j]->stats.tx_restart);
1793         }
1794
1795         /* RX */
1796         for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1797                 j = lio->linfo.rxpciq[vj].s.q_no;
1798
1799                 /* packets send to TCP/IP network stack */
1800                 /* # of packets to network stack */
1801                 data[i++] = CVM_CAST64(
1802                                 oct_dev->droq[j]->stats.rx_pkts_received);
1803                 /* # of bytes to network stack */
1804                 data[i++] = CVM_CAST64(
1805                                 oct_dev->droq[j]->stats.rx_bytes_received);
1806                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1807                                        oct_dev->droq[j]->stats.dropped_toomany +
1808                                        oct_dev->droq[j]->stats.rx_dropped);
1809                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1810                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1811                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1812
1813                 /* control and data path */
1814                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1815                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1816                 data[i++] =
1817                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1818
1819                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1820                 data[i++] =
1821                     CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1822         }
1823 }
1824
1825 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1826 {
1827         struct octeon_device *oct_dev = lio->oct_dev;
1828         int i;
1829
1830         switch (oct_dev->chip_id) {
1831         case OCTEON_CN23XX_PF_VID:
1832         case OCTEON_CN23XX_VF_VID:
1833                 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1834                         sprintf(data, "%s", oct_priv_flags_strings[i]);
1835                         data += ETH_GSTRING_LEN;
1836                 }
1837                 break;
1838         case OCTEON_CN68XX:
1839         case OCTEON_CN66XX:
1840                 break;
1841         default:
1842                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1843                 break;
1844         }
1845 }
1846
1847 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1848 {
1849         struct lio *lio = GET_LIO(netdev);
1850         struct octeon_device *oct_dev = lio->oct_dev;
1851         int num_iq_stats, num_oq_stats, i, j;
1852         int num_stats;
1853
1854         switch (stringset) {
1855         case ETH_SS_STATS:
1856                 num_stats = ARRAY_SIZE(oct_stats_strings);
1857                 for (j = 0; j < num_stats; j++) {
1858                         sprintf(data, "%s", oct_stats_strings[j]);
1859                         data += ETH_GSTRING_LEN;
1860                 }
1861
1862                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1863                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1864                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1865                                 continue;
1866                         for (j = 0; j < num_iq_stats; j++) {
1867                                 sprintf(data, "tx-%d-%s", i,
1868                                         oct_iq_stats_strings[j]);
1869                                 data += ETH_GSTRING_LEN;
1870                         }
1871                 }
1872
1873                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1874                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1875                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1876                                 continue;
1877                         for (j = 0; j < num_oq_stats; j++) {
1878                                 sprintf(data, "rx-%d-%s", i,
1879                                         oct_droq_stats_strings[j]);
1880                                 data += ETH_GSTRING_LEN;
1881                         }
1882                 }
1883                 break;
1884
1885         case ETH_SS_PRIV_FLAGS:
1886                 lio_get_priv_flags_strings(lio, data);
1887                 break;
1888         default:
1889                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1890                 break;
1891         }
1892 }
1893
1894 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1895                                u8 *data)
1896 {
1897         int num_iq_stats, num_oq_stats, i, j;
1898         struct lio *lio = GET_LIO(netdev);
1899         struct octeon_device *oct_dev = lio->oct_dev;
1900         int num_stats;
1901
1902         switch (stringset) {
1903         case ETH_SS_STATS:
1904                 num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1905                 for (j = 0; j < num_stats; j++) {
1906                         sprintf(data, "%s", oct_vf_stats_strings[j]);
1907                         data += ETH_GSTRING_LEN;
1908                 }
1909
1910                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1911                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1912                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1913                                 continue;
1914                         for (j = 0; j < num_iq_stats; j++) {
1915                                 sprintf(data, "tx-%d-%s", i,
1916                                         oct_iq_stats_strings[j]);
1917                                 data += ETH_GSTRING_LEN;
1918                         }
1919                 }
1920
1921                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1922                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1923                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1924                                 continue;
1925                         for (j = 0; j < num_oq_stats; j++) {
1926                                 sprintf(data, "rx-%d-%s", i,
1927                                         oct_droq_stats_strings[j]);
1928                                 data += ETH_GSTRING_LEN;
1929                         }
1930                 }
1931                 break;
1932
1933         case ETH_SS_PRIV_FLAGS:
1934                 lio_get_priv_flags_strings(lio, data);
1935                 break;
1936         default:
1937                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1938                 break;
1939         }
1940 }
1941
1942 static int lio_get_priv_flags_ss_count(struct lio *lio)
1943 {
1944         struct octeon_device *oct_dev = lio->oct_dev;
1945
1946         switch (oct_dev->chip_id) {
1947         case OCTEON_CN23XX_PF_VID:
1948         case OCTEON_CN23XX_VF_VID:
1949                 return ARRAY_SIZE(oct_priv_flags_strings);
1950         case OCTEON_CN68XX:
1951         case OCTEON_CN66XX:
1952                 return -EOPNOTSUPP;
1953         default:
1954                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1955                 return -EOPNOTSUPP;
1956         }
1957 }
1958
1959 static int lio_get_sset_count(struct net_device *netdev, int sset)
1960 {
1961         struct lio *lio = GET_LIO(netdev);
1962         struct octeon_device *oct_dev = lio->oct_dev;
1963
1964         switch (sset) {
1965         case ETH_SS_STATS:
1966                 return (ARRAY_SIZE(oct_stats_strings) +
1967                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1968                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1969         case ETH_SS_PRIV_FLAGS:
1970                 return lio_get_priv_flags_ss_count(lio);
1971         default:
1972                 return -EOPNOTSUPP;
1973         }
1974 }
1975
1976 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1977 {
1978         struct lio *lio = GET_LIO(netdev);
1979         struct octeon_device *oct_dev = lio->oct_dev;
1980
1981         switch (sset) {
1982         case ETH_SS_STATS:
1983                 return (ARRAY_SIZE(oct_vf_stats_strings) +
1984                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1985                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1986         case ETH_SS_PRIV_FLAGS:
1987                 return lio_get_priv_flags_ss_count(lio);
1988         default:
1989                 return -EOPNOTSUPP;
1990         }
1991 }
1992
1993 /*  get interrupt moderation parameters */
1994 static int octnet_get_intrmod_cfg(struct lio *lio,
1995                                   struct oct_intrmod_cfg *intr_cfg)
1996 {
1997         struct octeon_soft_command *sc;
1998         struct oct_intrmod_resp *resp;
1999         int retval;
2000         struct octeon_device *oct_dev = lio->oct_dev;
2001
2002         /* Alloc soft command */
2003         sc = (struct octeon_soft_command *)
2004                 octeon_alloc_soft_command(oct_dev,
2005                                           0,
2006                                           sizeof(struct oct_intrmod_resp), 0);
2007
2008         if (!sc)
2009                 return -ENOMEM;
2010
2011         resp = (struct oct_intrmod_resp *)sc->virtrptr;
2012         memset(resp, 0, sizeof(struct oct_intrmod_resp));
2013
2014         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2015
2016         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2017                                     OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
2018
2019         init_completion(&sc->complete);
2020         sc->sc_status = OCTEON_REQUEST_PENDING;
2021
2022         retval = octeon_send_soft_command(oct_dev, sc);
2023         if (retval == IQ_SEND_FAILED) {
2024                 octeon_free_soft_command(oct_dev, sc);
2025                 return -EINVAL;
2026         }
2027
2028         /* Sleep on a wait queue till the cond flag indicates that the
2029          * response arrived or timed-out.
2030          */
2031         retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2032         if (retval)
2033                 return -ENODEV;
2034
2035         if (resp->status) {
2036                 dev_err(&oct_dev->pci_dev->dev,
2037                         "Get interrupt moderation parameters failed\n");
2038                 WRITE_ONCE(sc->caller_is_done, true);
2039                 return -ENODEV;
2040         }
2041
2042         octeon_swap_8B_data((u64 *)&resp->intrmod,
2043                             (sizeof(struct oct_intrmod_cfg)) / 8);
2044         memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
2045         WRITE_ONCE(sc->caller_is_done, true);
2046
2047         return 0;
2048 }
2049
2050 /*  Configure interrupt moderation parameters */
2051 static int octnet_set_intrmod_cfg(struct lio *lio,
2052                                   struct oct_intrmod_cfg *intr_cfg)
2053 {
2054         struct octeon_soft_command *sc;
2055         struct oct_intrmod_cfg *cfg;
2056         int retval;
2057         struct octeon_device *oct_dev = lio->oct_dev;
2058
2059         /* Alloc soft command */
2060         sc = (struct octeon_soft_command *)
2061                 octeon_alloc_soft_command(oct_dev,
2062                                           sizeof(struct oct_intrmod_cfg),
2063                                           16, 0);
2064
2065         if (!sc)
2066                 return -ENOMEM;
2067
2068         cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
2069
2070         memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
2071         octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
2072
2073         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
2074
2075         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
2076                                     OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
2077
2078         init_completion(&sc->complete);
2079         sc->sc_status = OCTEON_REQUEST_PENDING;
2080
2081         retval = octeon_send_soft_command(oct_dev, sc);
2082         if (retval == IQ_SEND_FAILED) {
2083                 octeon_free_soft_command(oct_dev, sc);
2084                 return -EINVAL;
2085         }
2086
2087         /* Sleep on a wait queue till the cond flag indicates that the
2088          * response arrived or timed-out.
2089          */
2090         retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
2091         if (retval)
2092                 return retval;
2093
2094         retval = sc->sc_status;
2095         if (retval == 0) {
2096                 dev_info(&oct_dev->pci_dev->dev,
2097                          "Rx-Adaptive Interrupt moderation %s\n",
2098                          (intr_cfg->rx_enable) ?
2099                          "enabled" : "disabled");
2100                 WRITE_ONCE(sc->caller_is_done, true);
2101                 return 0;
2102         }
2103
2104         dev_err(&oct_dev->pci_dev->dev,
2105                 "intrmod config failed. Status: %x\n", retval);
2106         WRITE_ONCE(sc->caller_is_done, true);
2107         return -ENODEV;
2108 }
2109
2110 static int lio_get_intr_coalesce(struct net_device *netdev,
2111                                  struct ethtool_coalesce *intr_coal,
2112                                  struct kernel_ethtool_coalesce *kernel_coal,
2113                                  struct netlink_ext_ack *extack)
2114 {
2115         struct lio *lio = GET_LIO(netdev);
2116         struct octeon_device *oct = lio->oct_dev;
2117         struct octeon_instr_queue *iq;
2118         struct oct_intrmod_cfg intrmod_cfg;
2119
2120         if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
2121                 return -ENODEV;
2122
2123         switch (oct->chip_id) {
2124         case OCTEON_CN23XX_PF_VID:
2125         case OCTEON_CN23XX_VF_VID: {
2126                 if (!intrmod_cfg.rx_enable) {
2127                         intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
2128                         intr_coal->rx_max_coalesced_frames =
2129                                 oct->rx_max_coalesced_frames;
2130                 }
2131                 if (!intrmod_cfg.tx_enable)
2132                         intr_coal->tx_max_coalesced_frames =
2133                                 oct->tx_max_coalesced_frames;
2134                 break;
2135         }
2136         case OCTEON_CN68XX:
2137         case OCTEON_CN66XX: {
2138                 struct octeon_cn6xxx *cn6xxx =
2139                         (struct octeon_cn6xxx *)oct->chip;
2140
2141                 if (!intrmod_cfg.rx_enable) {
2142                         intr_coal->rx_coalesce_usecs =
2143                                 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
2144                         intr_coal->rx_max_coalesced_frames =
2145                                 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
2146                 }
2147                 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
2148                 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
2149                 break;
2150         }
2151         default:
2152                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
2153                 return -EINVAL;
2154         }
2155         if (intrmod_cfg.rx_enable) {
2156                 intr_coal->use_adaptive_rx_coalesce =
2157                         intrmod_cfg.rx_enable;
2158                 intr_coal->rate_sample_interval =
2159                         intrmod_cfg.check_intrvl;
2160                 intr_coal->pkt_rate_high =
2161                         intrmod_cfg.maxpkt_ratethr;
2162                 intr_coal->pkt_rate_low =
2163                         intrmod_cfg.minpkt_ratethr;
2164                 intr_coal->rx_max_coalesced_frames_high =
2165                         intrmod_cfg.rx_maxcnt_trigger;
2166                 intr_coal->rx_coalesce_usecs_high =
2167                         intrmod_cfg.rx_maxtmr_trigger;
2168                 intr_coal->rx_coalesce_usecs_low =
2169                         intrmod_cfg.rx_mintmr_trigger;
2170                 intr_coal->rx_max_coalesced_frames_low =
2171                         intrmod_cfg.rx_mincnt_trigger;
2172         }
2173         if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
2174             (intrmod_cfg.tx_enable)) {
2175                 intr_coal->use_adaptive_tx_coalesce =
2176                         intrmod_cfg.tx_enable;
2177                 intr_coal->tx_max_coalesced_frames_high =
2178                         intrmod_cfg.tx_maxcnt_trigger;
2179                 intr_coal->tx_max_coalesced_frames_low =
2180                         intrmod_cfg.tx_mincnt_trigger;
2181         }
2182         return 0;
2183 }
2184
2185 /* Enable/Disable auto interrupt Moderation */
2186 static int oct_cfg_adaptive_intr(struct lio *lio,
2187                                  struct oct_intrmod_cfg *intrmod_cfg,
2188                                  struct ethtool_coalesce *intr_coal)
2189 {
2190         int ret = 0;
2191
2192         if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
2193                 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
2194                 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
2195                 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
2196         }
2197         if (intrmod_cfg->rx_enable) {
2198                 intrmod_cfg->rx_maxcnt_trigger =
2199                         intr_coal->rx_max_coalesced_frames_high;
2200                 intrmod_cfg->rx_maxtmr_trigger =
2201                         intr_coal->rx_coalesce_usecs_high;
2202                 intrmod_cfg->rx_mintmr_trigger =
2203                         intr_coal->rx_coalesce_usecs_low;
2204                 intrmod_cfg->rx_mincnt_trigger =
2205                         intr_coal->rx_max_coalesced_frames_low;
2206         }
2207         if (intrmod_cfg->tx_enable) {
2208                 intrmod_cfg->tx_maxcnt_trigger =
2209                         intr_coal->tx_max_coalesced_frames_high;
2210                 intrmod_cfg->tx_mincnt_trigger =
2211                         intr_coal->tx_max_coalesced_frames_low;
2212         }
2213
2214         ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2215
2216         return ret;
2217 }
2218
2219 static int
2220 oct_cfg_rx_intrcnt(struct lio *lio,
2221                    struct oct_intrmod_cfg *intrmod,
2222                    struct ethtool_coalesce *intr_coal)
2223 {
2224         struct octeon_device *oct = lio->oct_dev;
2225         u32 rx_max_coalesced_frames;
2226
2227         /* Config Cnt based interrupt values */
2228         switch (oct->chip_id) {
2229         case OCTEON_CN68XX:
2230         case OCTEON_CN66XX: {
2231                 struct octeon_cn6xxx *cn6xxx =
2232                         (struct octeon_cn6xxx *)oct->chip;
2233
2234                 if (!intr_coal->rx_max_coalesced_frames)
2235                         rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2236                 else
2237                         rx_max_coalesced_frames =
2238                                 intr_coal->rx_max_coalesced_frames;
2239                 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2240                                  rx_max_coalesced_frames);
2241                 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2242                 break;
2243         }
2244         case OCTEON_CN23XX_PF_VID: {
2245                 int q_no;
2246
2247                 if (!intr_coal->rx_max_coalesced_frames)
2248                         rx_max_coalesced_frames = intrmod->rx_frames;
2249                 else
2250                         rx_max_coalesced_frames =
2251                             intr_coal->rx_max_coalesced_frames;
2252                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2253                         q_no += oct->sriov_info.pf_srn;
2254                         octeon_write_csr64(
2255                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2256                             (octeon_read_csr64(
2257                                  oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2258                              (0x3fffff00000000UL)) |
2259                                 (rx_max_coalesced_frames - 1));
2260                         /*consider setting resend bit*/
2261                 }
2262                 intrmod->rx_frames = rx_max_coalesced_frames;
2263                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2264                 break;
2265         }
2266         case OCTEON_CN23XX_VF_VID: {
2267                 int q_no;
2268
2269                 if (!intr_coal->rx_max_coalesced_frames)
2270                         rx_max_coalesced_frames = intrmod->rx_frames;
2271                 else
2272                         rx_max_coalesced_frames =
2273                             intr_coal->rx_max_coalesced_frames;
2274                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2275                         octeon_write_csr64(
2276                             oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2277                             (octeon_read_csr64(
2278                                  oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2279                              (0x3fffff00000000UL)) |
2280                                 (rx_max_coalesced_frames - 1));
2281                         /*consider writing to resend bit here*/
2282                 }
2283                 intrmod->rx_frames = rx_max_coalesced_frames;
2284                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2285                 break;
2286         }
2287         default:
2288                 return -EINVAL;
2289         }
2290         return 0;
2291 }
2292
2293 static int oct_cfg_rx_intrtime(struct lio *lio,
2294                                struct oct_intrmod_cfg *intrmod,
2295                                struct ethtool_coalesce *intr_coal)
2296 {
2297         struct octeon_device *oct = lio->oct_dev;
2298         u32 time_threshold, rx_coalesce_usecs;
2299
2300         /* Config Time based interrupt values */
2301         switch (oct->chip_id) {
2302         case OCTEON_CN68XX:
2303         case OCTEON_CN66XX: {
2304                 struct octeon_cn6xxx *cn6xxx =
2305                         (struct octeon_cn6xxx *)oct->chip;
2306                 if (!intr_coal->rx_coalesce_usecs)
2307                         rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2308                 else
2309                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2310
2311                 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2312                                                          rx_coalesce_usecs);
2313                 octeon_write_csr(oct,
2314                                  CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2315                                  time_threshold);
2316
2317                 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2318                 break;
2319         }
2320         case OCTEON_CN23XX_PF_VID: {
2321                 u64 time_threshold;
2322                 int q_no;
2323
2324                 if (!intr_coal->rx_coalesce_usecs)
2325                         rx_coalesce_usecs = intrmod->rx_usecs;
2326                 else
2327                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2328                 time_threshold =
2329                     cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2330                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2331                         q_no += oct->sriov_info.pf_srn;
2332                         octeon_write_csr64(oct,
2333                                            CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2334                                            (intrmod->rx_frames |
2335                                             ((u64)time_threshold << 32)));
2336                         /*consider writing to resend bit here*/
2337                 }
2338                 intrmod->rx_usecs = rx_coalesce_usecs;
2339                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
2340                 break;
2341         }
2342         case OCTEON_CN23XX_VF_VID: {
2343                 u64 time_threshold;
2344                 int q_no;
2345
2346                 if (!intr_coal->rx_coalesce_usecs)
2347                         rx_coalesce_usecs = intrmod->rx_usecs;
2348                 else
2349                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2350
2351                 time_threshold =
2352                     cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2353                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2354                         octeon_write_csr64(
2355                                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2356                                 (intrmod->rx_frames |
2357                                  ((u64)time_threshold << 32)));
2358                         /*consider setting resend bit*/
2359                 }
2360                 intrmod->rx_usecs = rx_coalesce_usecs;
2361                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
2362                 break;
2363         }
2364         default:
2365                 return -EINVAL;
2366         }
2367
2368         return 0;
2369 }
2370
2371 static int
2372 oct_cfg_tx_intrcnt(struct lio *lio,
2373                    struct oct_intrmod_cfg *intrmod,
2374                    struct ethtool_coalesce *intr_coal)
2375 {
2376         struct octeon_device *oct = lio->oct_dev;
2377         u32 iq_intr_pkt;
2378         void __iomem *inst_cnt_reg;
2379         u64 val;
2380
2381         /* Config Cnt based interrupt values */
2382         switch (oct->chip_id) {
2383         case OCTEON_CN68XX:
2384         case OCTEON_CN66XX:
2385                 break;
2386         case OCTEON_CN23XX_VF_VID:
2387         case OCTEON_CN23XX_PF_VID: {
2388                 int q_no;
2389
2390                 if (!intr_coal->tx_max_coalesced_frames)
2391                         iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2392                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
2393                 else
2394                         iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2395                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
2396                 for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2397                         inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2398                         val = readq(inst_cnt_reg);
2399                         /*clear wmark and count.dont want to write count back*/
2400                         val = (val & 0xFFFF000000000000ULL) |
2401                               ((u64)(iq_intr_pkt - 1)
2402                                << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2403                         writeq(val, inst_cnt_reg);
2404                         /*consider setting resend bit*/
2405                 }
2406                 intrmod->tx_frames = iq_intr_pkt;
2407                 oct->tx_max_coalesced_frames = iq_intr_pkt;
2408                 break;
2409         }
2410         default:
2411                 return -EINVAL;
2412         }
2413         return 0;
2414 }
2415
2416 static int lio_set_intr_coalesce(struct net_device *netdev,
2417                                  struct ethtool_coalesce *intr_coal,
2418                                  struct kernel_ethtool_coalesce *kernel_coal,
2419                                  struct netlink_ext_ack *extack)
2420 {
2421         struct lio *lio = GET_LIO(netdev);
2422         int ret;
2423         struct octeon_device *oct = lio->oct_dev;
2424         struct oct_intrmod_cfg intrmod = {0};
2425         u32 j, q_no;
2426         int db_max, db_min;
2427
2428         switch (oct->chip_id) {
2429         case OCTEON_CN68XX:
2430         case OCTEON_CN66XX:
2431                 db_min = CN6XXX_DB_MIN;
2432                 db_max = CN6XXX_DB_MAX;
2433                 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2434                     (intr_coal->tx_max_coalesced_frames <= db_max)) {
2435                         for (j = 0; j < lio->linfo.num_txpciq; j++) {
2436                                 q_no = lio->linfo.txpciq[j].s.q_no;
2437                                 oct->instr_queue[q_no]->fill_threshold =
2438                                         intr_coal->tx_max_coalesced_frames;
2439                         }
2440                 } else {
2441                         dev_err(&oct->pci_dev->dev,
2442                                 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2443                                 intr_coal->tx_max_coalesced_frames,
2444                                 db_min, db_max);
2445                         return -EINVAL;
2446                 }
2447                 break;
2448         case OCTEON_CN23XX_PF_VID:
2449         case OCTEON_CN23XX_VF_VID:
2450                 break;
2451         default:
2452                 return -EINVAL;
2453         }
2454
2455         intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2456         intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2457         intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2458         intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2459         intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2460
2461         ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2462
2463         if (!intr_coal->use_adaptive_rx_coalesce) {
2464                 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2465                 if (ret)
2466                         goto ret_intrmod;
2467
2468                 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2469                 if (ret)
2470                         goto ret_intrmod;
2471         } else {
2472                 oct->rx_coalesce_usecs =
2473                         CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2474                 oct->rx_max_coalesced_frames =
2475                         CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2476         }
2477
2478         if (!intr_coal->use_adaptive_tx_coalesce) {
2479                 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2480                 if (ret)
2481                         goto ret_intrmod;
2482         } else {
2483                 oct->tx_max_coalesced_frames =
2484                         CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2485         }
2486
2487         return 0;
2488 ret_intrmod:
2489         return ret;
2490 }
2491
2492 static int lio_get_ts_info(struct net_device *netdev,
2493                            struct ethtool_ts_info *info)
2494 {
2495         struct lio *lio = GET_LIO(netdev);
2496
2497         info->so_timestamping =
2498 #ifdef PTP_HARDWARE_TIMESTAMPING
2499                 SOF_TIMESTAMPING_TX_HARDWARE |
2500                 SOF_TIMESTAMPING_RX_HARDWARE |
2501                 SOF_TIMESTAMPING_RAW_HARDWARE |
2502                 SOF_TIMESTAMPING_TX_SOFTWARE |
2503 #endif
2504                 SOF_TIMESTAMPING_RX_SOFTWARE |
2505                 SOF_TIMESTAMPING_SOFTWARE;
2506
2507         if (lio->ptp_clock)
2508                 info->phc_index = ptp_clock_index(lio->ptp_clock);
2509         else
2510                 info->phc_index = -1;
2511
2512 #ifdef PTP_HARDWARE_TIMESTAMPING
2513         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2514
2515         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2516                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2517                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2518                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2519 #endif
2520
2521         return 0;
2522 }
2523
2524 /* Return register dump len. */
2525 static int lio_get_regs_len(struct net_device *dev)
2526 {
2527         struct lio *lio = GET_LIO(dev);
2528         struct octeon_device *oct = lio->oct_dev;
2529
2530         switch (oct->chip_id) {
2531         case OCTEON_CN23XX_PF_VID:
2532                 return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2533         case OCTEON_CN23XX_VF_VID:
2534                 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2535         default:
2536                 return OCT_ETHTOOL_REGDUMP_LEN;
2537         }
2538 }
2539
2540 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2541 {
2542         u32 reg;
2543         u8 pf_num = oct->pf_num;
2544         int len = 0;
2545         int i;
2546
2547         /* PCI  Window Registers */
2548
2549         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2550
2551         /*0x29030 or 0x29040*/
2552         reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2553         len += sprintf(s + len,
2554                        "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2555                        reg, oct->pcie_port, oct->pf_num,
2556                        (u64)octeon_read_csr64(oct, reg));
2557
2558         /*0x27080 or 0x27090*/
2559         reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2560         len +=
2561             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2562                     reg, oct->pcie_port, oct->pf_num,
2563                     (u64)octeon_read_csr64(oct, reg));
2564
2565         /*0x27000 or 0x27010*/
2566         reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2567         len +=
2568             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2569                     reg, oct->pcie_port, oct->pf_num,
2570                     (u64)octeon_read_csr64(oct, reg));
2571
2572         /*0x29120*/
2573         reg = 0x29120;
2574         len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2575                        (u64)octeon_read_csr64(oct, reg));
2576
2577         /*0x27300*/
2578         reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2579               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2580         len += sprintf(
2581             s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2582             oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2583
2584         /*0x27200*/
2585         reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2586               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2587         len += sprintf(s + len,
2588                        "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2589                        reg, oct->pcie_port, oct->pf_num,
2590                        (u64)octeon_read_csr64(oct, reg));
2591
2592         /*29130*/
2593         reg = CN23XX_SLI_PKT_CNT_INT;
2594         len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2595                        (u64)octeon_read_csr64(oct, reg));
2596
2597         /*0x29140*/
2598         reg = CN23XX_SLI_PKT_TIME_INT;
2599         len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2600                        (u64)octeon_read_csr64(oct, reg));
2601
2602         /*0x29160*/
2603         reg = 0x29160;
2604         len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2605                        (u64)octeon_read_csr64(oct, reg));
2606
2607         /*0x29180*/
2608         reg = CN23XX_SLI_OQ_WMARK;
2609         len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2610                        reg, (u64)octeon_read_csr64(oct, reg));
2611
2612         /*0x291E0*/
2613         reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2614         len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2615                        (u64)octeon_read_csr64(oct, reg));
2616
2617         /*0x29210*/
2618         reg = CN23XX_SLI_GBL_CONTROL;
2619         len += sprintf(s + len,
2620                        "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2621                        (u64)octeon_read_csr64(oct, reg));
2622
2623         /*0x29220*/
2624         reg = 0x29220;
2625         len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2626                        reg, (u64)octeon_read_csr64(oct, reg));
2627
2628         /*PF only*/
2629         if (pf_num == 0) {
2630                 /*0x29260*/
2631                 reg = CN23XX_SLI_OUT_BP_EN_W1S;
2632                 len += sprintf(s + len,
2633                                "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2634                                reg, (u64)octeon_read_csr64(oct, reg));
2635         } else if (pf_num == 1) {
2636                 /*0x29270*/
2637                 reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2638                 len += sprintf(s + len,
2639                                "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2640                                reg, (u64)octeon_read_csr64(oct, reg));
2641         }
2642
2643         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2644                 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2645                 len +=
2646                     sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2647                             reg, i, (u64)octeon_read_csr64(oct, reg));
2648         }
2649
2650         /*0x10040*/
2651         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2652                 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2653                 len += sprintf(s + len,
2654                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2655                                reg, i, (u64)octeon_read_csr64(oct, reg));
2656         }
2657
2658         /*0x10080*/
2659         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2660                 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2661                 len += sprintf(s + len,
2662                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2663                                reg, i, (u64)octeon_read_csr64(oct, reg));
2664         }
2665
2666         /*0x10090*/
2667         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2668                 reg = CN23XX_SLI_OQ_SIZE(i);
2669                 len += sprintf(
2670                     s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2671                     reg, i, (u64)octeon_read_csr64(oct, reg));
2672         }
2673
2674         /*0x10050*/
2675         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2676                 reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2677                 len += sprintf(
2678                         s + len,
2679                         "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2680                         reg, i, (u64)octeon_read_csr64(oct, reg));
2681         }
2682
2683         /*0x10070*/
2684         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2685                 reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2686                 len += sprintf(s + len,
2687                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2688                                reg, i, (u64)octeon_read_csr64(oct, reg));
2689         }
2690
2691         /*0x100a0*/
2692         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2693                 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2694                 len += sprintf(s + len,
2695                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2696                                reg, i, (u64)octeon_read_csr64(oct, reg));
2697         }
2698
2699         /*0x100b0*/
2700         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2701                 reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2702                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2703                                reg, i, (u64)octeon_read_csr64(oct, reg));
2704         }
2705
2706         /*0x100c0*/
2707         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2708                 reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2709                 len += sprintf(s + len,
2710                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2711                                reg, i, (u64)octeon_read_csr64(oct, reg));
2712
2713                 /*0x10000*/
2714                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2715                         reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2716                         len += sprintf(
2717                                 s + len,
2718                                 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2719                                 reg, i, (u64)octeon_read_csr64(oct, reg));
2720                 }
2721
2722                 /*0x10010*/
2723                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2724                         reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2725                         len += sprintf(
2726                             s + len,
2727                             "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2728                             i, (u64)octeon_read_csr64(oct, reg));
2729                 }
2730
2731                 /*0x10020*/
2732                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2733                         reg = CN23XX_SLI_IQ_DOORBELL(i);
2734                         len += sprintf(
2735                             s + len,
2736                             "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2737                             reg, i, (u64)octeon_read_csr64(oct, reg));
2738                 }
2739
2740                 /*0x10030*/
2741                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2742                         reg = CN23XX_SLI_IQ_SIZE(i);
2743                         len += sprintf(
2744                             s + len,
2745                             "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2746                             reg, i, (u64)octeon_read_csr64(oct, reg));
2747                 }
2748
2749                 /*0x10040*/
2750                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2751                         reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2752                 len += sprintf(s + len,
2753                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2754                                reg, i, (u64)octeon_read_csr64(oct, reg));
2755         }
2756
2757         return len;
2758 }
2759
2760 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2761 {
2762         int len = 0;
2763         u32 reg;
2764         int i;
2765
2766         /* PCI  Window Registers */
2767
2768         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2769
2770         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2771                 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2772                 len += sprintf(s + len,
2773                                "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2774                                reg, i, (u64)octeon_read_csr64(oct, reg));
2775         }
2776
2777         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2778                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2779                 len += sprintf(s + len,
2780                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2781                                reg, i, (u64)octeon_read_csr64(oct, reg));
2782         }
2783
2784         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2785                 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2786                 len += sprintf(s + len,
2787                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2788                                reg, i, (u64)octeon_read_csr64(oct, reg));
2789         }
2790
2791         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2792                 reg = CN23XX_VF_SLI_OQ_SIZE(i);
2793                 len += sprintf(s + len,
2794                                "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2795                                reg, i, (u64)octeon_read_csr64(oct, reg));
2796         }
2797
2798         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2799                 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2800                 len += sprintf(s + len,
2801                                "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2802                                reg, i, (u64)octeon_read_csr64(oct, reg));
2803         }
2804
2805         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2806                 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2807                 len += sprintf(s + len,
2808                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2809                                reg, i, (u64)octeon_read_csr64(oct, reg));
2810         }
2811
2812         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2813                 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2814                 len += sprintf(s + len,
2815                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2816                                reg, i, (u64)octeon_read_csr64(oct, reg));
2817         }
2818
2819         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2820                 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2821                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2822                                reg, i, (u64)octeon_read_csr64(oct, reg));
2823         }
2824
2825         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2826                 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2827                 len += sprintf(s + len,
2828                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2829                                reg, i, (u64)octeon_read_csr64(oct, reg));
2830         }
2831
2832         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2833                 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2834                 len += sprintf(s + len,
2835                                "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2836                                reg, i, (u64)octeon_read_csr64(oct, reg));
2837         }
2838
2839         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2840                 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2841                 len += sprintf(s + len,
2842                                "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2843                                reg, i, (u64)octeon_read_csr64(oct, reg));
2844         }
2845
2846         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2847                 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2848                 len += sprintf(s + len,
2849                                "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2850                                reg, i, (u64)octeon_read_csr64(oct, reg));
2851         }
2852
2853         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2854                 reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2855                 len += sprintf(s + len,
2856                                "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2857                                reg, i, (u64)octeon_read_csr64(oct, reg));
2858         }
2859
2860         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2861                 reg = CN23XX_VF_SLI_IQ_SIZE(i);
2862                 len += sprintf(s + len,
2863                                "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2864                                reg, i, (u64)octeon_read_csr64(oct, reg));
2865         }
2866
2867         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2868                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2869                 len += sprintf(s + len,
2870                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2871                                reg, i, (u64)octeon_read_csr64(oct, reg));
2872         }
2873
2874         return len;
2875 }
2876
2877 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2878 {
2879         u32 reg;
2880         int i, len = 0;
2881
2882         /* PCI  Window Registers */
2883
2884         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2885         reg = CN6XXX_WIN_WR_ADDR_LO;
2886         len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2887                        CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2888         reg = CN6XXX_WIN_WR_ADDR_HI;
2889         len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2890                        CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2891         reg = CN6XXX_WIN_RD_ADDR_LO;
2892         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2893                        CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2894         reg = CN6XXX_WIN_RD_ADDR_HI;
2895         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2896                        CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2897         reg = CN6XXX_WIN_WR_DATA_LO;
2898         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2899                        CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2900         reg = CN6XXX_WIN_WR_DATA_HI;
2901         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2902                        CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2903         len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2904                        CN6XXX_WIN_WR_MASK_REG,
2905                        octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2906
2907         /* PCI  Interrupt Register */
2908         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2909                        CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2910                                                 CN6XXX_SLI_INT_ENB64_PORT0));
2911         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2912                        CN6XXX_SLI_INT_ENB64_PORT1,
2913                        octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2914         len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2915                        octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2916
2917         /* PCI  Output queue registers */
2918         for (i = 0; i < oct->num_oqs; i++) {
2919                 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2920                 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2921                                reg, i, octeon_read_csr(oct, reg));
2922                 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2923                 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2924                                reg, i, octeon_read_csr(oct, reg));
2925         }
2926         reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2927         len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2928                        reg, octeon_read_csr(oct, reg));
2929         reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2930         len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2931                        reg, octeon_read_csr(oct, reg));
2932
2933         /* PCI  Input queue registers */
2934         for (i = 0; i <= 3; i++) {
2935                 u32 reg;
2936
2937                 reg = CN6XXX_SLI_IQ_DOORBELL(i);
2938                 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2939                                reg, i, octeon_read_csr(oct, reg));
2940                 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2941                 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2942                                reg, i, octeon_read_csr(oct, reg));
2943         }
2944
2945         /* PCI  DMA registers */
2946
2947         len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2948                        CN6XXX_DMA_CNT(0),
2949                        octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2950         reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2951         len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2952                        CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2953         reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2954         len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2955                        CN6XXX_DMA_TIME_INT_LEVEL(0),
2956                        octeon_read_csr(oct, reg));
2957
2958         len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2959                        CN6XXX_DMA_CNT(1),
2960                        octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2961         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2962         len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2963                        CN6XXX_DMA_PKT_INT_LEVEL(1),
2964                        octeon_read_csr(oct, reg));
2965         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2966         len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2967                        CN6XXX_DMA_TIME_INT_LEVEL(1),
2968                        octeon_read_csr(oct, reg));
2969
2970         /* PCI  Index registers */
2971
2972         len += sprintf(s + len, "\n");
2973
2974         for (i = 0; i < 16; i++) {
2975                 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2976                 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2977                                CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2978         }
2979
2980         return len;
2981 }
2982
2983 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2984 {
2985         u32 val;
2986         int i, len = 0;
2987
2988         /* PCI CONFIG Registers */
2989
2990         len += sprintf(s + len,
2991                        "\n\t Octeon Config space Registers\n\n");
2992
2993         for (i = 0; i <= 13; i++) {
2994                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2995                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2996                                (i * 4), i, val);
2997         }
2998
2999         for (i = 30; i <= 34; i++) {
3000                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
3001                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
3002                                (i * 4), i, val);
3003         }
3004
3005         return len;
3006 }
3007
3008 /*  Return register dump user app.  */
3009 static void lio_get_regs(struct net_device *dev,
3010                          struct ethtool_regs *regs, void *regbuf)
3011 {
3012         struct lio *lio = GET_LIO(dev);
3013         int len = 0;
3014         struct octeon_device *oct = lio->oct_dev;
3015
3016         regs->version = OCT_ETHTOOL_REGSVER;
3017
3018         switch (oct->chip_id) {
3019         case OCTEON_CN23XX_PF_VID:
3020                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
3021                 len += cn23xx_read_csr_reg(regbuf + len, oct);
3022                 break;
3023         case OCTEON_CN23XX_VF_VID:
3024                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
3025                 len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
3026                 break;
3027         case OCTEON_CN68XX:
3028         case OCTEON_CN66XX:
3029                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
3030                 len += cn6xxx_read_csr_reg(regbuf + len, oct);
3031                 len += cn6xxx_read_config_reg(regbuf + len, oct);
3032                 break;
3033         default:
3034                 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
3035                         __func__, oct->chip_id);
3036         }
3037 }
3038
3039 static u32 lio_get_priv_flags(struct net_device *netdev)
3040 {
3041         struct lio *lio = GET_LIO(netdev);
3042
3043         return lio->oct_dev->priv_flags;
3044 }
3045
3046 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
3047 {
3048         struct lio *lio = GET_LIO(netdev);
3049         bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
3050
3051         lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
3052                           intr_by_tx_bytes);
3053         return 0;
3054 }
3055
3056 static int lio_get_fecparam(struct net_device *netdev,
3057                             struct ethtool_fecparam *fec)
3058 {
3059         struct lio *lio = GET_LIO(netdev);
3060         struct octeon_device *oct = lio->oct_dev;
3061
3062         fec->active_fec = ETHTOOL_FEC_NONE;
3063         fec->fec = ETHTOOL_FEC_NONE;
3064
3065         if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3066             oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3067                 if (oct->no_speed_setting == 1)
3068                         return 0;
3069
3070                 liquidio_get_fec(lio);
3071                 fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
3072                 if (oct->props[lio->ifidx].fec == 1)
3073                         fec->active_fec = ETHTOOL_FEC_RS;
3074                 else
3075                         fec->active_fec = ETHTOOL_FEC_OFF;
3076         }
3077
3078         return 0;
3079 }
3080
3081 static int lio_set_fecparam(struct net_device *netdev,
3082                             struct ethtool_fecparam *fec)
3083 {
3084         struct lio *lio = GET_LIO(netdev);
3085         struct octeon_device *oct = lio->oct_dev;
3086
3087         if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
3088             oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
3089                 if (oct->no_speed_setting == 1)
3090                         return -EOPNOTSUPP;
3091
3092                 if (fec->fec & ETHTOOL_FEC_OFF)
3093                         liquidio_set_fec(lio, 0);
3094                 else if (fec->fec & ETHTOOL_FEC_RS)
3095                         liquidio_set_fec(lio, 1);
3096                 else
3097                         return -EOPNOTSUPP;
3098         } else {
3099                 return -EOPNOTSUPP;
3100         }
3101
3102         return 0;
3103 }
3104
3105 #define LIO_ETHTOOL_COALESCE    (ETHTOOL_COALESCE_RX_USECS |            \
3106                                  ETHTOOL_COALESCE_MAX_FRAMES |          \
3107                                  ETHTOOL_COALESCE_USE_ADAPTIVE |        \
3108                                  ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW |   \
3109                                  ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW |   \
3110                                  ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH |  \
3111                                  ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH |  \
3112                                  ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
3113
3114 static const struct ethtool_ops lio_ethtool_ops = {
3115         .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3116         .get_link_ksettings     = lio_get_link_ksettings,
3117         .set_link_ksettings     = lio_set_link_ksettings,
3118         .get_fecparam           = lio_get_fecparam,
3119         .set_fecparam           = lio_set_fecparam,
3120         .get_link               = ethtool_op_get_link,
3121         .get_drvinfo            = lio_get_drvinfo,
3122         .get_ringparam          = lio_ethtool_get_ringparam,
3123         .set_ringparam          = lio_ethtool_set_ringparam,
3124         .get_channels           = lio_ethtool_get_channels,
3125         .set_channels           = lio_ethtool_set_channels,
3126         .set_phys_id            = lio_set_phys_id,
3127         .get_eeprom_len         = lio_get_eeprom_len,
3128         .get_eeprom             = lio_get_eeprom,
3129         .get_strings            = lio_get_strings,
3130         .get_ethtool_stats      = lio_get_ethtool_stats,
3131         .get_pauseparam         = lio_get_pauseparam,
3132         .set_pauseparam         = lio_set_pauseparam,
3133         .get_regs_len           = lio_get_regs_len,
3134         .get_regs               = lio_get_regs,
3135         .get_msglevel           = lio_get_msglevel,
3136         .set_msglevel           = lio_set_msglevel,
3137         .get_sset_count         = lio_get_sset_count,
3138         .get_coalesce           = lio_get_intr_coalesce,
3139         .set_coalesce           = lio_set_intr_coalesce,
3140         .get_priv_flags         = lio_get_priv_flags,
3141         .set_priv_flags         = lio_set_priv_flags,
3142         .get_ts_info            = lio_get_ts_info,
3143 };
3144
3145 static const struct ethtool_ops lio_vf_ethtool_ops = {
3146         .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
3147         .get_link_ksettings     = lio_get_link_ksettings,
3148         .get_link               = ethtool_op_get_link,
3149         .get_drvinfo            = lio_get_vf_drvinfo,
3150         .get_ringparam          = lio_ethtool_get_ringparam,
3151         .set_ringparam          = lio_ethtool_set_ringparam,
3152         .get_channels           = lio_ethtool_get_channels,
3153         .set_channels           = lio_ethtool_set_channels,
3154         .get_strings            = lio_vf_get_strings,
3155         .get_ethtool_stats      = lio_vf_get_ethtool_stats,
3156         .get_regs_len           = lio_get_regs_len,
3157         .get_regs               = lio_get_regs,
3158         .get_msglevel           = lio_get_msglevel,
3159         .set_msglevel           = lio_vf_set_msglevel,
3160         .get_sset_count         = lio_vf_get_sset_count,
3161         .get_coalesce           = lio_get_intr_coalesce,
3162         .set_coalesce           = lio_set_intr_coalesce,
3163         .get_priv_flags         = lio_get_priv_flags,
3164         .set_priv_flags         = lio_set_priv_flags,
3165         .get_ts_info            = lio_get_ts_info,
3166 };
3167
3168 void liquidio_set_ethtool_ops(struct net_device *netdev)
3169 {
3170         struct lio *lio = GET_LIO(netdev);
3171         struct octeon_device *oct = lio->oct_dev;
3172
3173         if (OCTEON_CN23XX_VF(oct))
3174                 netdev->ethtool_ops = &lio_vf_ethtool_ops;
3175         else
3176                 netdev->ethtool_ops = &lio_ethtool_ops;
3177 }