c0e6ab42e0e1dac088f0496fc3fbbc0a65ede993
[sfrench/cifs-2.6.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_ethtool.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2016 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 /* ethtool support for ixgbe */
30
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
41
42 #include "ixgbe.h"
43 #include "ixgbe_phy.h"
44
45
46 #define IXGBE_ALL_RAR_ENTRIES 16
47
48 enum {NETDEV_STATS, IXGBE_STATS};
49
50 struct ixgbe_stats {
51         char stat_string[ETH_GSTRING_LEN];
52         int type;
53         int sizeof_stat;
54         int stat_offset;
55 };
56
57 #define IXGBE_STAT(m)           IXGBE_STATS, \
58                                 sizeof(((struct ixgbe_adapter *)0)->m), \
59                                 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m)    NETDEV_STATS, \
61                                 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62                                 offsetof(struct rtnl_link_stats64, m)
63
64 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65         {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66         {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67         {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68         {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69         {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70         {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71         {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72         {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73         {"lsc_int", IXGBE_STAT(lsc_int)},
74         {"tx_busy", IXGBE_STAT(tx_busy)},
75         {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76         {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77         {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78         {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79         {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80         {"multicast", IXGBE_NETDEV_STAT(multicast)},
81         {"broadcast", IXGBE_STAT(stats.bprc)},
82         {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83         {"collisions", IXGBE_NETDEV_STAT(collisions)},
84         {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85         {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86         {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87         {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88         {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89         {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90         {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91         {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92         {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93         {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94         {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95         {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96         {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97         {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98         {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99         {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100         {"rx_length_errors", IXGBE_STAT(stats.rlec)},
101         {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
102         {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
103         {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
104         {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
105         {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
106         {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
107         {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
108         {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
109         {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
110         {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
111         {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
112         {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
113         {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
114         {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
115         {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
116         {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
117         {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
118         {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
119         {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
120         {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
121 #ifdef IXGBE_FCOE
122         {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
123         {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
124         {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
125         {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
126         {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
127         {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
128         {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
129         {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
130 #endif /* IXGBE_FCOE */
131 };
132
133 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
134  * we set the num_rx_queues to evaluate to num_tx_queues. This is
135  * used because we do not have a good way to get the max number of
136  * rx queues with CONFIG_RPS disabled.
137  */
138 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
139
140 #define IXGBE_QUEUE_STATS_LEN ( \
141         (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
142         (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
143 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
144 #define IXGBE_PB_STATS_LEN ( \
145                         (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
146                          sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
147                          sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
148                          sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
149                         / sizeof(u64))
150 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
151                          IXGBE_PB_STATS_LEN + \
152                          IXGBE_QUEUE_STATS_LEN)
153
154 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
155         "Register test  (offline)", "Eeprom test    (offline)",
156         "Interrupt test (offline)", "Loopback test  (offline)",
157         "Link test   (on/offline)"
158 };
159 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
160
161 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
162 #define IXGBE_PRIV_FLAGS_LEGACY_RX      BIT(0)
163         "legacy-rx",
164 };
165
166 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
167
168 /* currently supported speeds for 10G */
169 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
170                          SUPPORTED_10000baseKX4_Full | \
171                          SUPPORTED_10000baseKR_Full)
172
173 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
174
175 static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
176 {
177         if (!ixgbe_isbackplane(hw->phy.media_type))
178                 return SUPPORTED_10000baseT_Full;
179
180         switch (hw->device_id) {
181         case IXGBE_DEV_ID_82598:
182         case IXGBE_DEV_ID_82599_KX4:
183         case IXGBE_DEV_ID_82599_KX4_MEZZ:
184         case IXGBE_DEV_ID_X550EM_X_KX4:
185                 return SUPPORTED_10000baseKX4_Full;
186         case IXGBE_DEV_ID_82598_BX:
187         case IXGBE_DEV_ID_82599_KR:
188         case IXGBE_DEV_ID_X550EM_X_KR:
189         case IXGBE_DEV_ID_X550EM_X_XFI:
190                 return SUPPORTED_10000baseKR_Full;
191         default:
192                 return SUPPORTED_10000baseKX4_Full |
193                        SUPPORTED_10000baseKR_Full;
194         }
195 }
196
197 static int ixgbe_get_link_ksettings(struct net_device *netdev,
198                                     struct ethtool_link_ksettings *cmd)
199 {
200         struct ixgbe_adapter *adapter = netdev_priv(netdev);
201         struct ixgbe_hw *hw = &adapter->hw;
202         ixgbe_link_speed supported_link;
203         bool autoneg = false;
204         u32 supported, advertising;
205
206         ethtool_convert_link_mode_to_legacy_u32(&supported,
207                                                 cmd->link_modes.supported);
208
209         hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
210
211         /* set the supported link speeds */
212         if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
213                 supported |= ixgbe_get_supported_10gtypes(hw);
214         if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
215                 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
216                                    SUPPORTED_1000baseKX_Full :
217                                    SUPPORTED_1000baseT_Full;
218         if (supported_link & IXGBE_LINK_SPEED_100_FULL)
219                 supported |= SUPPORTED_100baseT_Full;
220         if (supported_link & IXGBE_LINK_SPEED_10_FULL)
221                 supported |= SUPPORTED_10baseT_Full;
222
223         /* default advertised speed if phy.autoneg_advertised isn't set */
224         advertising = supported;
225         /* set the advertised speeds */
226         if (hw->phy.autoneg_advertised) {
227                 advertising = 0;
228                 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
229                         advertising |= ADVERTISED_10baseT_Full;
230                 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
231                         advertising |= ADVERTISED_100baseT_Full;
232                 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
233                         advertising |= supported & ADVRTSD_MSK_10G;
234                 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
235                         if (supported & SUPPORTED_1000baseKX_Full)
236                                 advertising |= ADVERTISED_1000baseKX_Full;
237                         else
238                                 advertising |= ADVERTISED_1000baseT_Full;
239                 }
240         } else {
241                 if (hw->phy.multispeed_fiber && !autoneg) {
242                         if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
243                                 advertising = ADVERTISED_10000baseT_Full;
244                 }
245         }
246
247         if (autoneg) {
248                 supported |= SUPPORTED_Autoneg;
249                 advertising |= ADVERTISED_Autoneg;
250                 cmd->base.autoneg = AUTONEG_ENABLE;
251         } else
252                 cmd->base.autoneg = AUTONEG_DISABLE;
253
254         /* Determine the remaining settings based on the PHY type. */
255         switch (adapter->hw.phy.type) {
256         case ixgbe_phy_tn:
257         case ixgbe_phy_aq:
258         case ixgbe_phy_x550em_ext_t:
259         case ixgbe_phy_fw:
260         case ixgbe_phy_cu_unknown:
261                 supported |= SUPPORTED_TP;
262                 advertising |= ADVERTISED_TP;
263                 cmd->base.port = PORT_TP;
264                 break;
265         case ixgbe_phy_qt:
266                 supported |= SUPPORTED_FIBRE;
267                 advertising |= ADVERTISED_FIBRE;
268                 cmd->base.port = PORT_FIBRE;
269                 break;
270         case ixgbe_phy_nl:
271         case ixgbe_phy_sfp_passive_tyco:
272         case ixgbe_phy_sfp_passive_unknown:
273         case ixgbe_phy_sfp_ftl:
274         case ixgbe_phy_sfp_avago:
275         case ixgbe_phy_sfp_intel:
276         case ixgbe_phy_sfp_unknown:
277         case ixgbe_phy_qsfp_passive_unknown:
278         case ixgbe_phy_qsfp_active_unknown:
279         case ixgbe_phy_qsfp_intel:
280         case ixgbe_phy_qsfp_unknown:
281                 /* SFP+ devices, further checking needed */
282                 switch (adapter->hw.phy.sfp_type) {
283                 case ixgbe_sfp_type_da_cu:
284                 case ixgbe_sfp_type_da_cu_core0:
285                 case ixgbe_sfp_type_da_cu_core1:
286                         supported |= SUPPORTED_FIBRE;
287                         advertising |= ADVERTISED_FIBRE;
288                         cmd->base.port = PORT_DA;
289                         break;
290                 case ixgbe_sfp_type_sr:
291                 case ixgbe_sfp_type_lr:
292                 case ixgbe_sfp_type_srlr_core0:
293                 case ixgbe_sfp_type_srlr_core1:
294                 case ixgbe_sfp_type_1g_sx_core0:
295                 case ixgbe_sfp_type_1g_sx_core1:
296                 case ixgbe_sfp_type_1g_lx_core0:
297                 case ixgbe_sfp_type_1g_lx_core1:
298                         supported |= SUPPORTED_FIBRE;
299                         advertising |= ADVERTISED_FIBRE;
300                         cmd->base.port = PORT_FIBRE;
301                         break;
302                 case ixgbe_sfp_type_not_present:
303                         supported |= SUPPORTED_FIBRE;
304                         advertising |= ADVERTISED_FIBRE;
305                         cmd->base.port = PORT_NONE;
306                         break;
307                 case ixgbe_sfp_type_1g_cu_core0:
308                 case ixgbe_sfp_type_1g_cu_core1:
309                         supported |= SUPPORTED_TP;
310                         advertising |= ADVERTISED_TP;
311                         cmd->base.port = PORT_TP;
312                         break;
313                 case ixgbe_sfp_type_unknown:
314                 default:
315                         supported |= SUPPORTED_FIBRE;
316                         advertising |= ADVERTISED_FIBRE;
317                         cmd->base.port = PORT_OTHER;
318                         break;
319                 }
320                 break;
321         case ixgbe_phy_xaui:
322                 supported |= SUPPORTED_FIBRE;
323                 advertising |= ADVERTISED_FIBRE;
324                 cmd->base.port = PORT_NONE;
325                 break;
326         case ixgbe_phy_unknown:
327         case ixgbe_phy_generic:
328         case ixgbe_phy_sfp_unsupported:
329         default:
330                 supported |= SUPPORTED_FIBRE;
331                 advertising |= ADVERTISED_FIBRE;
332                 cmd->base.port = PORT_OTHER;
333                 break;
334         }
335
336         /* Indicate pause support */
337         supported |= SUPPORTED_Pause;
338
339         switch (hw->fc.requested_mode) {
340         case ixgbe_fc_full:
341                 advertising |= ADVERTISED_Pause;
342                 break;
343         case ixgbe_fc_rx_pause:
344                 advertising |= ADVERTISED_Pause |
345                                      ADVERTISED_Asym_Pause;
346                 break;
347         case ixgbe_fc_tx_pause:
348                 advertising |= ADVERTISED_Asym_Pause;
349                 break;
350         default:
351                 advertising &= ~(ADVERTISED_Pause |
352                                        ADVERTISED_Asym_Pause);
353         }
354
355         if (netif_carrier_ok(netdev)) {
356                 switch (adapter->link_speed) {
357                 case IXGBE_LINK_SPEED_10GB_FULL:
358                         cmd->base.speed = SPEED_10000;
359                         break;
360                 case IXGBE_LINK_SPEED_5GB_FULL:
361                         cmd->base.speed = SPEED_5000;
362                         break;
363                 case IXGBE_LINK_SPEED_2_5GB_FULL:
364                         cmd->base.speed = SPEED_2500;
365                         break;
366                 case IXGBE_LINK_SPEED_1GB_FULL:
367                         cmd->base.speed = SPEED_1000;
368                         break;
369                 case IXGBE_LINK_SPEED_100_FULL:
370                         cmd->base.speed = SPEED_100;
371                         break;
372                 case IXGBE_LINK_SPEED_10_FULL:
373                         cmd->base.speed = SPEED_10;
374                         break;
375                 default:
376                         break;
377                 }
378                 cmd->base.duplex = DUPLEX_FULL;
379         } else {
380                 cmd->base.speed = SPEED_UNKNOWN;
381                 cmd->base.duplex = DUPLEX_UNKNOWN;
382         }
383
384         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
385                                                 supported);
386         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
387                                                 advertising);
388
389         return 0;
390 }
391
392 static int ixgbe_set_link_ksettings(struct net_device *netdev,
393                                     const struct ethtool_link_ksettings *cmd)
394 {
395         struct ixgbe_adapter *adapter = netdev_priv(netdev);
396         struct ixgbe_hw *hw = &adapter->hw;
397         u32 advertised, old;
398         s32 err = 0;
399         u32 supported, advertising;
400
401         ethtool_convert_link_mode_to_legacy_u32(&supported,
402                                                 cmd->link_modes.supported);
403         ethtool_convert_link_mode_to_legacy_u32(&advertising,
404                                                 cmd->link_modes.advertising);
405
406         if ((hw->phy.media_type == ixgbe_media_type_copper) ||
407             (hw->phy.multispeed_fiber)) {
408                 /*
409                  * this function does not support duplex forcing, but can
410                  * limit the advertising of the adapter to the specified speed
411                  */
412                 if (advertising & ~supported)
413                         return -EINVAL;
414
415                 /* only allow one speed at a time if no autoneg */
416                 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
417                         if (advertising ==
418                             (ADVERTISED_10000baseT_Full |
419                              ADVERTISED_1000baseT_Full))
420                                 return -EINVAL;
421                 }
422
423                 old = hw->phy.autoneg_advertised;
424                 advertised = 0;
425                 if (advertising & ADVERTISED_10000baseT_Full)
426                         advertised |= IXGBE_LINK_SPEED_10GB_FULL;
427
428                 if (advertising & ADVERTISED_1000baseT_Full)
429                         advertised |= IXGBE_LINK_SPEED_1GB_FULL;
430
431                 if (advertising & ADVERTISED_100baseT_Full)
432                         advertised |= IXGBE_LINK_SPEED_100_FULL;
433
434                 if (advertising & ADVERTISED_10baseT_Full)
435                         advertised |= IXGBE_LINK_SPEED_10_FULL;
436
437                 if (old == advertised)
438                         return err;
439                 /* this sets the link speed and restarts auto-neg */
440                 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
441                         usleep_range(1000, 2000);
442
443                 hw->mac.autotry_restart = true;
444                 err = hw->mac.ops.setup_link(hw, advertised, true);
445                 if (err) {
446                         e_info(probe, "setup link failed with code %d\n", err);
447                         hw->mac.ops.setup_link(hw, old, true);
448                 }
449                 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
450         } else {
451                 /* in this case we currently only support 10Gb/FULL */
452                 u32 speed = cmd->base.speed;
453
454                 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
455                     (advertising != ADVERTISED_10000baseT_Full) ||
456                     (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
457                         return -EINVAL;
458         }
459
460         return err;
461 }
462
463 static void ixgbe_get_pauseparam(struct net_device *netdev,
464                                  struct ethtool_pauseparam *pause)
465 {
466         struct ixgbe_adapter *adapter = netdev_priv(netdev);
467         struct ixgbe_hw *hw = &adapter->hw;
468
469         if (ixgbe_device_supports_autoneg_fc(hw) &&
470             !hw->fc.disable_fc_autoneg)
471                 pause->autoneg = 1;
472         else
473                 pause->autoneg = 0;
474
475         if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
476                 pause->rx_pause = 1;
477         } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
478                 pause->tx_pause = 1;
479         } else if (hw->fc.current_mode == ixgbe_fc_full) {
480                 pause->rx_pause = 1;
481                 pause->tx_pause = 1;
482         }
483 }
484
485 static int ixgbe_set_pauseparam(struct net_device *netdev,
486                                 struct ethtool_pauseparam *pause)
487 {
488         struct ixgbe_adapter *adapter = netdev_priv(netdev);
489         struct ixgbe_hw *hw = &adapter->hw;
490         struct ixgbe_fc_info fc = hw->fc;
491
492         /* 82598 does no support link flow control with DCB enabled */
493         if ((hw->mac.type == ixgbe_mac_82598EB) &&
494             (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
495                 return -EINVAL;
496
497         /* some devices do not support autoneg of link flow control */
498         if ((pause->autoneg == AUTONEG_ENABLE) &&
499             !ixgbe_device_supports_autoneg_fc(hw))
500                 return -EINVAL;
501
502         fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
503
504         if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
505                 fc.requested_mode = ixgbe_fc_full;
506         else if (pause->rx_pause && !pause->tx_pause)
507                 fc.requested_mode = ixgbe_fc_rx_pause;
508         else if (!pause->rx_pause && pause->tx_pause)
509                 fc.requested_mode = ixgbe_fc_tx_pause;
510         else
511                 fc.requested_mode = ixgbe_fc_none;
512
513         /* if the thing changed then we'll update and use new autoneg */
514         if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
515                 hw->fc = fc;
516                 if (netif_running(netdev))
517                         ixgbe_reinit_locked(adapter);
518                 else
519                         ixgbe_reset(adapter);
520         }
521
522         return 0;
523 }
524
525 static u32 ixgbe_get_msglevel(struct net_device *netdev)
526 {
527         struct ixgbe_adapter *adapter = netdev_priv(netdev);
528         return adapter->msg_enable;
529 }
530
531 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
532 {
533         struct ixgbe_adapter *adapter = netdev_priv(netdev);
534         adapter->msg_enable = data;
535 }
536
537 static int ixgbe_get_regs_len(struct net_device *netdev)
538 {
539 #define IXGBE_REGS_LEN  1139
540         return IXGBE_REGS_LEN * sizeof(u32);
541 }
542
543 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
544
545 static void ixgbe_get_regs(struct net_device *netdev,
546                            struct ethtool_regs *regs, void *p)
547 {
548         struct ixgbe_adapter *adapter = netdev_priv(netdev);
549         struct ixgbe_hw *hw = &adapter->hw;
550         u32 *regs_buff = p;
551         u8 i;
552
553         memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
554
555         regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
556                         hw->device_id;
557
558         /* General Registers */
559         regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
560         regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
561         regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
562         regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
563         regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
564         regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
565         regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
566         regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
567
568         /* NVM Register */
569         regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
570         regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
571         regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
572         regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
573         regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
574         regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
575         regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
576         regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
577         regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
578         regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
579
580         /* Interrupt */
581         /* don't read EICR because it can clear interrupt causes, instead
582          * read EICS which is a shadow but doesn't clear EICR */
583         regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
584         regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
585         regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
586         regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
587         regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
588         regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
589         regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
590         regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
591         regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
592         regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
593         regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
594         regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
595
596         /* Flow Control */
597         regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
598         for (i = 0; i < 4; i++)
599                 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
600         for (i = 0; i < 8; i++) {
601                 switch (hw->mac.type) {
602                 case ixgbe_mac_82598EB:
603                         regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
604                         regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
605                         break;
606                 case ixgbe_mac_82599EB:
607                 case ixgbe_mac_X540:
608                 case ixgbe_mac_X550:
609                 case ixgbe_mac_X550EM_x:
610                 case ixgbe_mac_x550em_a:
611                         regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
612                         regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
613                         break;
614                 default:
615                         break;
616                 }
617         }
618         regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
619         regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
620
621         /* Receive DMA */
622         for (i = 0; i < 64; i++)
623                 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
624         for (i = 0; i < 64; i++)
625                 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
626         for (i = 0; i < 64; i++)
627                 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
628         for (i = 0; i < 64; i++)
629                 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
630         for (i = 0; i < 64; i++)
631                 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
632         for (i = 0; i < 64; i++)
633                 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
634         for (i = 0; i < 16; i++)
635                 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
636         for (i = 0; i < 16; i++)
637                 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
638         regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
639         for (i = 0; i < 8; i++)
640                 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
641         regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
642         regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
643
644         /* Receive */
645         regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
646         regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
647         for (i = 0; i < 16; i++)
648                 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
649         for (i = 0; i < 16; i++)
650                 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
651         regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
652         regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
653         regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
654         regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
655         regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
656         regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
657         for (i = 0; i < 8; i++)
658                 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
659         for (i = 0; i < 8; i++)
660                 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
661         regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
662
663         /* Transmit */
664         for (i = 0; i < 32; i++)
665                 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
666         for (i = 0; i < 32; i++)
667                 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
668         for (i = 0; i < 32; i++)
669                 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
670         for (i = 0; i < 32; i++)
671                 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
672         for (i = 0; i < 32; i++)
673                 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
674         for (i = 0; i < 32; i++)
675                 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
676         for (i = 0; i < 32; i++)
677                 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
678         for (i = 0; i < 32; i++)
679                 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
680         regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
681         for (i = 0; i < 16; i++)
682                 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
683         regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
684         for (i = 0; i < 8; i++)
685                 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
686         regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
687
688         /* Wake Up */
689         regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
690         regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
691         regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
692         regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
693         regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
694         regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
695         regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
696         regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
697         regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
698
699         /* DCB */
700         regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
701         regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
702
703         switch (hw->mac.type) {
704         case ixgbe_mac_82598EB:
705                 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
706                 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
707                 for (i = 0; i < 8; i++)
708                         regs_buff[833 + i] =
709                                 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
710                 for (i = 0; i < 8; i++)
711                         regs_buff[841 + i] =
712                                 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
713                 for (i = 0; i < 8; i++)
714                         regs_buff[849 + i] =
715                                 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
716                 for (i = 0; i < 8; i++)
717                         regs_buff[857 + i] =
718                                 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
719                 break;
720         case ixgbe_mac_82599EB:
721         case ixgbe_mac_X540:
722         case ixgbe_mac_X550:
723         case ixgbe_mac_X550EM_x:
724         case ixgbe_mac_x550em_a:
725                 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
726                 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
727                 for (i = 0; i < 8; i++)
728                         regs_buff[833 + i] =
729                                 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
730                 for (i = 0; i < 8; i++)
731                         regs_buff[841 + i] =
732                                 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
733                 for (i = 0; i < 8; i++)
734                         regs_buff[849 + i] =
735                                 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
736                 for (i = 0; i < 8; i++)
737                         regs_buff[857 + i] =
738                                 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
739                 break;
740         default:
741                 break;
742         }
743
744         for (i = 0; i < 8; i++)
745                 regs_buff[865 + i] =
746                 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
747         for (i = 0; i < 8; i++)
748                 regs_buff[873 + i] =
749                 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
750
751         /* Statistics */
752         regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
753         regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
754         regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
755         regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
756         for (i = 0; i < 8; i++)
757                 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
758         regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
759         regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
760         regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
761         regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
762         regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
763         regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
764         regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
765         for (i = 0; i < 8; i++)
766                 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
767         for (i = 0; i < 8; i++)
768                 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
769         for (i = 0; i < 8; i++)
770                 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
771         for (i = 0; i < 8; i++)
772                 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
773         regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
774         regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
775         regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
776         regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
777         regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
778         regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
779         regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
780         regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
781         regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
782         regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
783         regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
784         regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
785         regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
786         regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
787         for (i = 0; i < 8; i++)
788                 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
789         regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
790         regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
791         regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
792         regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
793         regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
794         regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
795         regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
796         regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
797         regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
798         regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
799         regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
800         regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
801         regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
802         regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
803         regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
804         regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
805         regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
806         regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
807         regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
808         regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
809         for (i = 0; i < 16; i++)
810                 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
811         for (i = 0; i < 16; i++)
812                 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
813         for (i = 0; i < 16; i++)
814                 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
815         for (i = 0; i < 16; i++)
816                 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
817
818         /* MAC */
819         regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
820         regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
821         regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
822         regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
823         regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
824         regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
825         regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
826         regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
827         regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
828         regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
829         regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
830         regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
831         regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
832         regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
833         regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
834         regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
835         regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
836         regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
837         regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
838         regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
839         regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
840         regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
841         regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
842         regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
843         regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
844         regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
845         regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
846         regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
847         regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
848         regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
849         regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
850         regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
851         regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
852
853         /* Diagnostic */
854         regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
855         for (i = 0; i < 8; i++)
856                 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
857         regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
858         for (i = 0; i < 4; i++)
859                 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
860         regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
861         regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
862         for (i = 0; i < 8; i++)
863                 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
864         regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
865         for (i = 0; i < 4; i++)
866                 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
867         regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
868         regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
869         for (i = 0; i < 4; i++)
870                 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
871         regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
872         for (i = 0; i < 4; i++)
873                 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
874         for (i = 0; i < 8; i++)
875                 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
876         regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
877         regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
878         regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
879         regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
880         regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
881         regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
882         regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
883         regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
884         regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
885
886         /* 82599 X540 specific registers  */
887         regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
888
889         /* 82599 X540 specific DCB registers  */
890         regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
891         regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
892         for (i = 0; i < 4; i++)
893                 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
894         regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
895                                         /* same as RTTQCNRM */
896         regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
897                                         /* same as RTTQCNRR */
898
899         /* X540 specific DCB registers  */
900         regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
901         regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
902 }
903
904 static int ixgbe_get_eeprom_len(struct net_device *netdev)
905 {
906         struct ixgbe_adapter *adapter = netdev_priv(netdev);
907         return adapter->hw.eeprom.word_size * 2;
908 }
909
910 static int ixgbe_get_eeprom(struct net_device *netdev,
911                             struct ethtool_eeprom *eeprom, u8 *bytes)
912 {
913         struct ixgbe_adapter *adapter = netdev_priv(netdev);
914         struct ixgbe_hw *hw = &adapter->hw;
915         u16 *eeprom_buff;
916         int first_word, last_word, eeprom_len;
917         int ret_val = 0;
918         u16 i;
919
920         if (eeprom->len == 0)
921                 return -EINVAL;
922
923         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
924
925         first_word = eeprom->offset >> 1;
926         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
927         eeprom_len = last_word - first_word + 1;
928
929         eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
930         if (!eeprom_buff)
931                 return -ENOMEM;
932
933         ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
934                                              eeprom_buff);
935
936         /* Device's eeprom is always little-endian, word addressable */
937         for (i = 0; i < eeprom_len; i++)
938                 le16_to_cpus(&eeprom_buff[i]);
939
940         memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
941         kfree(eeprom_buff);
942
943         return ret_val;
944 }
945
946 static int ixgbe_set_eeprom(struct net_device *netdev,
947                             struct ethtool_eeprom *eeprom, u8 *bytes)
948 {
949         struct ixgbe_adapter *adapter = netdev_priv(netdev);
950         struct ixgbe_hw *hw = &adapter->hw;
951         u16 *eeprom_buff;
952         void *ptr;
953         int max_len, first_word, last_word, ret_val = 0;
954         u16 i;
955
956         if (eeprom->len == 0)
957                 return -EINVAL;
958
959         if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
960                 return -EINVAL;
961
962         max_len = hw->eeprom.word_size * 2;
963
964         first_word = eeprom->offset >> 1;
965         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
966         eeprom_buff = kmalloc(max_len, GFP_KERNEL);
967         if (!eeprom_buff)
968                 return -ENOMEM;
969
970         ptr = eeprom_buff;
971
972         if (eeprom->offset & 1) {
973                 /*
974                  * need read/modify/write of first changed EEPROM word
975                  * only the second byte of the word is being modified
976                  */
977                 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
978                 if (ret_val)
979                         goto err;
980
981                 ptr++;
982         }
983         if ((eeprom->offset + eeprom->len) & 1) {
984                 /*
985                  * need read/modify/write of last changed EEPROM word
986                  * only the first byte of the word is being modified
987                  */
988                 ret_val = hw->eeprom.ops.read(hw, last_word,
989                                           &eeprom_buff[last_word - first_word]);
990                 if (ret_val)
991                         goto err;
992         }
993
994         /* Device's eeprom is always little-endian, word addressable */
995         for (i = 0; i < last_word - first_word + 1; i++)
996                 le16_to_cpus(&eeprom_buff[i]);
997
998         memcpy(ptr, bytes, eeprom->len);
999
1000         for (i = 0; i < last_word - first_word + 1; i++)
1001                 cpu_to_le16s(&eeprom_buff[i]);
1002
1003         ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1004                                               last_word - first_word + 1,
1005                                               eeprom_buff);
1006
1007         /* Update the checksum */
1008         if (ret_val == 0)
1009                 hw->eeprom.ops.update_checksum(hw);
1010
1011 err:
1012         kfree(eeprom_buff);
1013         return ret_val;
1014 }
1015
1016 static void ixgbe_get_drvinfo(struct net_device *netdev,
1017                               struct ethtool_drvinfo *drvinfo)
1018 {
1019         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1020
1021         strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1022         strlcpy(drvinfo->version, ixgbe_driver_version,
1023                 sizeof(drvinfo->version));
1024
1025         strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1026                 sizeof(drvinfo->fw_version));
1027
1028         strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1029                 sizeof(drvinfo->bus_info));
1030
1031         drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1032 }
1033
1034 static void ixgbe_get_ringparam(struct net_device *netdev,
1035                                 struct ethtool_ringparam *ring)
1036 {
1037         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1038         struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1039         struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1040
1041         ring->rx_max_pending = IXGBE_MAX_RXD;
1042         ring->tx_max_pending = IXGBE_MAX_TXD;
1043         ring->rx_pending = rx_ring->count;
1044         ring->tx_pending = tx_ring->count;
1045 }
1046
1047 static int ixgbe_set_ringparam(struct net_device *netdev,
1048                                struct ethtool_ringparam *ring)
1049 {
1050         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1051         struct ixgbe_ring *temp_ring;
1052         int i, j, err = 0;
1053         u32 new_rx_count, new_tx_count;
1054
1055         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1056                 return -EINVAL;
1057
1058         new_tx_count = clamp_t(u32, ring->tx_pending,
1059                                IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1060         new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1061
1062         new_rx_count = clamp_t(u32, ring->rx_pending,
1063                                IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1064         new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1065
1066         if ((new_tx_count == adapter->tx_ring_count) &&
1067             (new_rx_count == adapter->rx_ring_count)) {
1068                 /* nothing to do */
1069                 return 0;
1070         }
1071
1072         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1073                 usleep_range(1000, 2000);
1074
1075         if (!netif_running(adapter->netdev)) {
1076                 for (i = 0; i < adapter->num_tx_queues; i++)
1077                         adapter->tx_ring[i]->count = new_tx_count;
1078                 for (i = 0; i < adapter->num_xdp_queues; i++)
1079                         adapter->xdp_ring[i]->count = new_tx_count;
1080                 for (i = 0; i < adapter->num_rx_queues; i++)
1081                         adapter->rx_ring[i]->count = new_rx_count;
1082                 adapter->tx_ring_count = new_tx_count;
1083                 adapter->xdp_ring_count = new_tx_count;
1084                 adapter->rx_ring_count = new_rx_count;
1085                 goto clear_reset;
1086         }
1087
1088         /* allocate temporary buffer to store rings in */
1089         i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1090                   adapter->num_rx_queues);
1091         temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1092
1093         if (!temp_ring) {
1094                 err = -ENOMEM;
1095                 goto clear_reset;
1096         }
1097
1098         ixgbe_down(adapter);
1099
1100         /*
1101          * Setup new Tx resources and free the old Tx resources in that order.
1102          * We can then assign the new resources to the rings via a memcpy.
1103          * The advantage to this approach is that we are guaranteed to still
1104          * have resources even in the case of an allocation failure.
1105          */
1106         if (new_tx_count != adapter->tx_ring_count) {
1107                 for (i = 0; i < adapter->num_tx_queues; i++) {
1108                         memcpy(&temp_ring[i], adapter->tx_ring[i],
1109                                sizeof(struct ixgbe_ring));
1110
1111                         temp_ring[i].count = new_tx_count;
1112                         err = ixgbe_setup_tx_resources(&temp_ring[i]);
1113                         if (err) {
1114                                 while (i) {
1115                                         i--;
1116                                         ixgbe_free_tx_resources(&temp_ring[i]);
1117                                 }
1118                                 goto err_setup;
1119                         }
1120                 }
1121
1122                 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1123                         memcpy(&temp_ring[i], adapter->xdp_ring[j],
1124                                sizeof(struct ixgbe_ring));
1125
1126                         temp_ring[i].count = new_tx_count;
1127                         err = ixgbe_setup_tx_resources(&temp_ring[i]);
1128                         if (err) {
1129                                 while (i) {
1130                                         i--;
1131                                         ixgbe_free_tx_resources(&temp_ring[i]);
1132                                 }
1133                                 goto err_setup;
1134                         }
1135                 }
1136
1137                 for (i = 0; i < adapter->num_tx_queues; i++) {
1138                         ixgbe_free_tx_resources(adapter->tx_ring[i]);
1139
1140                         memcpy(adapter->tx_ring[i], &temp_ring[i],
1141                                sizeof(struct ixgbe_ring));
1142                 }
1143                 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1144                         ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1145
1146                         memcpy(adapter->xdp_ring[j], &temp_ring[i],
1147                                sizeof(struct ixgbe_ring));
1148                 }
1149
1150                 adapter->tx_ring_count = new_tx_count;
1151         }
1152
1153         /* Repeat the process for the Rx rings if needed */
1154         if (new_rx_count != adapter->rx_ring_count) {
1155                 for (i = 0; i < adapter->num_rx_queues; i++) {
1156                         memcpy(&temp_ring[i], adapter->rx_ring[i],
1157                                sizeof(struct ixgbe_ring));
1158
1159                         /* Clear copied XDP RX-queue info */
1160                         memset(&temp_ring[i].xdp_rxq, 0,
1161                                sizeof(temp_ring[i].xdp_rxq));
1162
1163                         temp_ring[i].count = new_rx_count;
1164                         err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1165                         if (err) {
1166                                 while (i) {
1167                                         i--;
1168                                         ixgbe_free_rx_resources(&temp_ring[i]);
1169                                 }
1170                                 goto err_setup;
1171                         }
1172
1173                 }
1174
1175                 for (i = 0; i < adapter->num_rx_queues; i++) {
1176                         ixgbe_free_rx_resources(adapter->rx_ring[i]);
1177
1178                         memcpy(adapter->rx_ring[i], &temp_ring[i],
1179                                sizeof(struct ixgbe_ring));
1180                 }
1181
1182                 adapter->rx_ring_count = new_rx_count;
1183         }
1184
1185 err_setup:
1186         ixgbe_up(adapter);
1187         vfree(temp_ring);
1188 clear_reset:
1189         clear_bit(__IXGBE_RESETTING, &adapter->state);
1190         return err;
1191 }
1192
1193 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1194 {
1195         switch (sset) {
1196         case ETH_SS_TEST:
1197                 return IXGBE_TEST_LEN;
1198         case ETH_SS_STATS:
1199                 return IXGBE_STATS_LEN;
1200         case ETH_SS_PRIV_FLAGS:
1201                 return IXGBE_PRIV_FLAGS_STR_LEN;
1202         default:
1203                 return -EOPNOTSUPP;
1204         }
1205 }
1206
1207 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1208                                     struct ethtool_stats *stats, u64 *data)
1209 {
1210         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1211         struct rtnl_link_stats64 temp;
1212         const struct rtnl_link_stats64 *net_stats;
1213         unsigned int start;
1214         struct ixgbe_ring *ring;
1215         int i, j;
1216         char *p = NULL;
1217
1218         ixgbe_update_stats(adapter);
1219         net_stats = dev_get_stats(netdev, &temp);
1220         for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1221                 switch (ixgbe_gstrings_stats[i].type) {
1222                 case NETDEV_STATS:
1223                         p = (char *) net_stats +
1224                                         ixgbe_gstrings_stats[i].stat_offset;
1225                         break;
1226                 case IXGBE_STATS:
1227                         p = (char *) adapter +
1228                                         ixgbe_gstrings_stats[i].stat_offset;
1229                         break;
1230                 default:
1231                         data[i] = 0;
1232                         continue;
1233                 }
1234
1235                 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1236                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1237         }
1238         for (j = 0; j < netdev->num_tx_queues; j++) {
1239                 ring = adapter->tx_ring[j];
1240                 if (!ring) {
1241                         data[i] = 0;
1242                         data[i+1] = 0;
1243                         i += 2;
1244                         continue;
1245                 }
1246
1247                 do {
1248                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1249                         data[i]   = ring->stats.packets;
1250                         data[i+1] = ring->stats.bytes;
1251                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1252                 i += 2;
1253         }
1254         for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1255                 ring = adapter->rx_ring[j];
1256                 if (!ring) {
1257                         data[i] = 0;
1258                         data[i+1] = 0;
1259                         i += 2;
1260                         continue;
1261                 }
1262
1263                 do {
1264                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1265                         data[i]   = ring->stats.packets;
1266                         data[i+1] = ring->stats.bytes;
1267                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1268                 i += 2;
1269         }
1270
1271         for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1272                 data[i++] = adapter->stats.pxontxc[j];
1273                 data[i++] = adapter->stats.pxofftxc[j];
1274         }
1275         for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1276                 data[i++] = adapter->stats.pxonrxc[j];
1277                 data[i++] = adapter->stats.pxoffrxc[j];
1278         }
1279 }
1280
1281 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1282                               u8 *data)
1283 {
1284         char *p = (char *)data;
1285         unsigned int i;
1286
1287         switch (stringset) {
1288         case ETH_SS_TEST:
1289                 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1290                         memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1291                         data += ETH_GSTRING_LEN;
1292                 }
1293                 break;
1294         case ETH_SS_STATS:
1295                 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1296                         memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1297                                ETH_GSTRING_LEN);
1298                         p += ETH_GSTRING_LEN;
1299                 }
1300                 for (i = 0; i < netdev->num_tx_queues; i++) {
1301                         sprintf(p, "tx_queue_%u_packets", i);
1302                         p += ETH_GSTRING_LEN;
1303                         sprintf(p, "tx_queue_%u_bytes", i);
1304                         p += ETH_GSTRING_LEN;
1305                 }
1306                 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1307                         sprintf(p, "rx_queue_%u_packets", i);
1308                         p += ETH_GSTRING_LEN;
1309                         sprintf(p, "rx_queue_%u_bytes", i);
1310                         p += ETH_GSTRING_LEN;
1311                 }
1312                 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1313                         sprintf(p, "tx_pb_%u_pxon", i);
1314                         p += ETH_GSTRING_LEN;
1315                         sprintf(p, "tx_pb_%u_pxoff", i);
1316                         p += ETH_GSTRING_LEN;
1317                 }
1318                 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1319                         sprintf(p, "rx_pb_%u_pxon", i);
1320                         p += ETH_GSTRING_LEN;
1321                         sprintf(p, "rx_pb_%u_pxoff", i);
1322                         p += ETH_GSTRING_LEN;
1323                 }
1324                 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1325                 break;
1326         case ETH_SS_PRIV_FLAGS:
1327                 memcpy(data, ixgbe_priv_flags_strings,
1328                        IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1329         }
1330 }
1331
1332 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1333 {
1334         struct ixgbe_hw *hw = &adapter->hw;
1335         bool link_up;
1336         u32 link_speed = 0;
1337
1338         if (ixgbe_removed(hw->hw_addr)) {
1339                 *data = 1;
1340                 return 1;
1341         }
1342         *data = 0;
1343
1344         hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1345         if (link_up)
1346                 return *data;
1347         else
1348                 *data = 1;
1349         return *data;
1350 }
1351
1352 /* ethtool register test data */
1353 struct ixgbe_reg_test {
1354         u16 reg;
1355         u8  array_len;
1356         u8  test_type;
1357         u32 mask;
1358         u32 write;
1359 };
1360
1361 /* In the hardware, registers are laid out either singly, in arrays
1362  * spaced 0x40 bytes apart, or in contiguous tables.  We assume
1363  * most tests take place on arrays or single registers (handled
1364  * as a single-element array) and special-case the tables.
1365  * Table tests are always pattern tests.
1366  *
1367  * We also make provision for some required setup steps by specifying
1368  * registers to be written without any read-back testing.
1369  */
1370
1371 #define PATTERN_TEST    1
1372 #define SET_READ_TEST   2
1373 #define WRITE_NO_TEST   3
1374 #define TABLE32_TEST    4
1375 #define TABLE64_TEST_LO 5
1376 #define TABLE64_TEST_HI 6
1377
1378 /* default 82599 register test */
1379 static const struct ixgbe_reg_test reg_test_82599[] = {
1380         { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1381         { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1382         { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1383         { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1384         { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1385         { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1386         { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1387         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1388         { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1389         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1390         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1391         { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1392         { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1393         { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1394         { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1395         { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1396         { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1397         { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1398         { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1399         { .reg = 0 }
1400 };
1401
1402 /* default 82598 register test */
1403 static const struct ixgbe_reg_test reg_test_82598[] = {
1404         { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1405         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1406         { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1407         { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1408         { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1409         { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1410         { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1411         /* Enable all four RX queues before testing. */
1412         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1413         /* RDH is read-only for 82598, only test RDT. */
1414         { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1415         { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1416         { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1417         { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1418         { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1419         { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1420         { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1421         { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1422         { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1423         { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1424         { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1425         { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1426         { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1427         { .reg = 0 }
1428 };
1429
1430 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1431                              u32 mask, u32 write)
1432 {
1433         u32 pat, val, before;
1434         static const u32 test_pattern[] = {
1435                 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1436
1437         if (ixgbe_removed(adapter->hw.hw_addr)) {
1438                 *data = 1;
1439                 return true;
1440         }
1441         for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1442                 before = ixgbe_read_reg(&adapter->hw, reg);
1443                 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1444                 val = ixgbe_read_reg(&adapter->hw, reg);
1445                 if (val != (test_pattern[pat] & write & mask)) {
1446                         e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1447                               reg, val, (test_pattern[pat] & write & mask));
1448                         *data = reg;
1449                         ixgbe_write_reg(&adapter->hw, reg, before);
1450                         return true;
1451                 }
1452                 ixgbe_write_reg(&adapter->hw, reg, before);
1453         }
1454         return false;
1455 }
1456
1457 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1458                               u32 mask, u32 write)
1459 {
1460         u32 val, before;
1461
1462         if (ixgbe_removed(adapter->hw.hw_addr)) {
1463                 *data = 1;
1464                 return true;
1465         }
1466         before = ixgbe_read_reg(&adapter->hw, reg);
1467         ixgbe_write_reg(&adapter->hw, reg, write & mask);
1468         val = ixgbe_read_reg(&adapter->hw, reg);
1469         if ((write & mask) != (val & mask)) {
1470                 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1471                       reg, (val & mask), (write & mask));
1472                 *data = reg;
1473                 ixgbe_write_reg(&adapter->hw, reg, before);
1474                 return true;
1475         }
1476         ixgbe_write_reg(&adapter->hw, reg, before);
1477         return false;
1478 }
1479
1480 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1481 {
1482         const struct ixgbe_reg_test *test;
1483         u32 value, before, after;
1484         u32 i, toggle;
1485
1486         if (ixgbe_removed(adapter->hw.hw_addr)) {
1487                 e_err(drv, "Adapter removed - register test blocked\n");
1488                 *data = 1;
1489                 return 1;
1490         }
1491         switch (adapter->hw.mac.type) {
1492         case ixgbe_mac_82598EB:
1493                 toggle = 0x7FFFF3FF;
1494                 test = reg_test_82598;
1495                 break;
1496         case ixgbe_mac_82599EB:
1497         case ixgbe_mac_X540:
1498         case ixgbe_mac_X550:
1499         case ixgbe_mac_X550EM_x:
1500         case ixgbe_mac_x550em_a:
1501                 toggle = 0x7FFFF30F;
1502                 test = reg_test_82599;
1503                 break;
1504         default:
1505                 *data = 1;
1506                 return 1;
1507         }
1508
1509         /*
1510          * Because the status register is such a special case,
1511          * we handle it separately from the rest of the register
1512          * tests.  Some bits are read-only, some toggle, and some
1513          * are writeable on newer MACs.
1514          */
1515         before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1516         value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1517         ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1518         after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1519         if (value != after) {
1520                 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1521                       after, value);
1522                 *data = 1;
1523                 return 1;
1524         }
1525         /* restore previous status */
1526         ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1527
1528         /*
1529          * Perform the remainder of the register test, looping through
1530          * the test table until we either fail or reach the null entry.
1531          */
1532         while (test->reg) {
1533                 for (i = 0; i < test->array_len; i++) {
1534                         bool b = false;
1535
1536                         switch (test->test_type) {
1537                         case PATTERN_TEST:
1538                                 b = reg_pattern_test(adapter, data,
1539                                                      test->reg + (i * 0x40),
1540                                                      test->mask,
1541                                                      test->write);
1542                                 break;
1543                         case SET_READ_TEST:
1544                                 b = reg_set_and_check(adapter, data,
1545                                                       test->reg + (i * 0x40),
1546                                                       test->mask,
1547                                                       test->write);
1548                                 break;
1549                         case WRITE_NO_TEST:
1550                                 ixgbe_write_reg(&adapter->hw,
1551                                                 test->reg + (i * 0x40),
1552                                                 test->write);
1553                                 break;
1554                         case TABLE32_TEST:
1555                                 b = reg_pattern_test(adapter, data,
1556                                                      test->reg + (i * 4),
1557                                                      test->mask,
1558                                                      test->write);
1559                                 break;
1560                         case TABLE64_TEST_LO:
1561                                 b = reg_pattern_test(adapter, data,
1562                                                      test->reg + (i * 8),
1563                                                      test->mask,
1564                                                      test->write);
1565                                 break;
1566                         case TABLE64_TEST_HI:
1567                                 b = reg_pattern_test(adapter, data,
1568                                                      (test->reg + 4) + (i * 8),
1569                                                      test->mask,
1570                                                      test->write);
1571                                 break;
1572                         }
1573                         if (b)
1574                                 return 1;
1575                 }
1576                 test++;
1577         }
1578
1579         *data = 0;
1580         return 0;
1581 }
1582
1583 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1584 {
1585         struct ixgbe_hw *hw = &adapter->hw;
1586         if (hw->eeprom.ops.validate_checksum(hw, NULL))
1587                 *data = 1;
1588         else
1589                 *data = 0;
1590         return *data;
1591 }
1592
1593 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1594 {
1595         struct net_device *netdev = (struct net_device *) data;
1596         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1597
1598         adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1599
1600         return IRQ_HANDLED;
1601 }
1602
1603 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1604 {
1605         struct net_device *netdev = adapter->netdev;
1606         u32 mask, i = 0, shared_int = true;
1607         u32 irq = adapter->pdev->irq;
1608
1609         *data = 0;
1610
1611         /* Hook up test interrupt handler just for this test */
1612         if (adapter->msix_entries) {
1613                 /* NOTE: we don't test MSI-X interrupts here, yet */
1614                 return 0;
1615         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1616                 shared_int = false;
1617                 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1618                                 netdev)) {
1619                         *data = 1;
1620                         return -1;
1621                 }
1622         } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1623                                 netdev->name, netdev)) {
1624                 shared_int = false;
1625         } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1626                                netdev->name, netdev)) {
1627                 *data = 1;
1628                 return -1;
1629         }
1630         e_info(hw, "testing %s interrupt\n", shared_int ?
1631                "shared" : "unshared");
1632
1633         /* Disable all the interrupts */
1634         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1635         IXGBE_WRITE_FLUSH(&adapter->hw);
1636         usleep_range(10000, 20000);
1637
1638         /* Test each interrupt */
1639         for (; i < 10; i++) {
1640                 /* Interrupt to test */
1641                 mask = BIT(i);
1642
1643                 if (!shared_int) {
1644                         /*
1645                          * Disable the interrupts to be reported in
1646                          * the cause register and then force the same
1647                          * interrupt and see if one gets posted.  If
1648                          * an interrupt was posted to the bus, the
1649                          * test failed.
1650                          */
1651                         adapter->test_icr = 0;
1652                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1653                                         ~mask & 0x00007FFF);
1654                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1655                                         ~mask & 0x00007FFF);
1656                         IXGBE_WRITE_FLUSH(&adapter->hw);
1657                         usleep_range(10000, 20000);
1658
1659                         if (adapter->test_icr & mask) {
1660                                 *data = 3;
1661                                 break;
1662                         }
1663                 }
1664
1665                 /*
1666                  * Enable the interrupt to be reported in the cause
1667                  * register and then force the same interrupt and see
1668                  * if one gets posted.  If an interrupt was not posted
1669                  * to the bus, the test failed.
1670                  */
1671                 adapter->test_icr = 0;
1672                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1673                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1674                 IXGBE_WRITE_FLUSH(&adapter->hw);
1675                 usleep_range(10000, 20000);
1676
1677                 if (!(adapter->test_icr & mask)) {
1678                         *data = 4;
1679                         break;
1680                 }
1681
1682                 if (!shared_int) {
1683                         /*
1684                          * Disable the other interrupts to be reported in
1685                          * the cause register and then force the other
1686                          * interrupts and see if any get posted.  If
1687                          * an interrupt was posted to the bus, the
1688                          * test failed.
1689                          */
1690                         adapter->test_icr = 0;
1691                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1692                                         ~mask & 0x00007FFF);
1693                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1694                                         ~mask & 0x00007FFF);
1695                         IXGBE_WRITE_FLUSH(&adapter->hw);
1696                         usleep_range(10000, 20000);
1697
1698                         if (adapter->test_icr) {
1699                                 *data = 5;
1700                                 break;
1701                         }
1702                 }
1703         }
1704
1705         /* Disable all the interrupts */
1706         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1707         IXGBE_WRITE_FLUSH(&adapter->hw);
1708         usleep_range(10000, 20000);
1709
1710         /* Unhook test interrupt handler */
1711         free_irq(irq, netdev);
1712
1713         return *data;
1714 }
1715
1716 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1717 {
1718         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1719         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1720         struct ixgbe_hw *hw = &adapter->hw;
1721         u32 reg_ctl;
1722
1723         /* shut down the DMA engines now so they can be reinitialized later */
1724
1725         /* first Rx */
1726         hw->mac.ops.disable_rx(hw);
1727         ixgbe_disable_rx_queue(adapter, rx_ring);
1728
1729         /* now Tx */
1730         reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1731         reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1732         IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1733
1734         switch (hw->mac.type) {
1735         case ixgbe_mac_82599EB:
1736         case ixgbe_mac_X540:
1737         case ixgbe_mac_X550:
1738         case ixgbe_mac_X550EM_x:
1739         case ixgbe_mac_x550em_a:
1740                 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1741                 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1742                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1743                 break;
1744         default:
1745                 break;
1746         }
1747
1748         ixgbe_reset(adapter);
1749
1750         ixgbe_free_tx_resources(&adapter->test_tx_ring);
1751         ixgbe_free_rx_resources(&adapter->test_rx_ring);
1752 }
1753
1754 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1755 {
1756         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1757         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1758         struct ixgbe_hw *hw = &adapter->hw;
1759         u32 rctl, reg_data;
1760         int ret_val;
1761         int err;
1762
1763         /* Setup Tx descriptor ring and Tx buffers */
1764         tx_ring->count = IXGBE_DEFAULT_TXD;
1765         tx_ring->queue_index = 0;
1766         tx_ring->dev = &adapter->pdev->dev;
1767         tx_ring->netdev = adapter->netdev;
1768         tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1769
1770         err = ixgbe_setup_tx_resources(tx_ring);
1771         if (err)
1772                 return 1;
1773
1774         switch (adapter->hw.mac.type) {
1775         case ixgbe_mac_82599EB:
1776         case ixgbe_mac_X540:
1777         case ixgbe_mac_X550:
1778         case ixgbe_mac_X550EM_x:
1779         case ixgbe_mac_x550em_a:
1780                 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1781                 reg_data |= IXGBE_DMATXCTL_TE;
1782                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1783                 break;
1784         default:
1785                 break;
1786         }
1787
1788         ixgbe_configure_tx_ring(adapter, tx_ring);
1789
1790         /* Setup Rx Descriptor ring and Rx buffers */
1791         rx_ring->count = IXGBE_DEFAULT_RXD;
1792         rx_ring->queue_index = 0;
1793         rx_ring->dev = &adapter->pdev->dev;
1794         rx_ring->netdev = adapter->netdev;
1795         rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1796
1797         err = ixgbe_setup_rx_resources(adapter, rx_ring);
1798         if (err) {
1799                 ret_val = 4;
1800                 goto err_nomem;
1801         }
1802
1803         hw->mac.ops.disable_rx(hw);
1804
1805         ixgbe_configure_rx_ring(adapter, rx_ring);
1806
1807         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1808         rctl |= IXGBE_RXCTRL_DMBYPS;
1809         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1810
1811         hw->mac.ops.enable_rx(hw);
1812
1813         return 0;
1814
1815 err_nomem:
1816         ixgbe_free_desc_rings(adapter);
1817         return ret_val;
1818 }
1819
1820 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1821 {
1822         struct ixgbe_hw *hw = &adapter->hw;
1823         u32 reg_data;
1824
1825
1826         /* Setup MAC loopback */
1827         reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1828         reg_data |= IXGBE_HLREG0_LPBK;
1829         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1830
1831         reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1832         reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1833         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1834
1835         /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1836         switch (adapter->hw.mac.type) {
1837         case ixgbe_mac_X540:
1838         case ixgbe_mac_X550:
1839         case ixgbe_mac_X550EM_x:
1840         case ixgbe_mac_x550em_a:
1841                 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1842                 reg_data |= IXGBE_MACC_FLU;
1843                 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1844                 break;
1845         default:
1846                 if (hw->mac.orig_autoc) {
1847                         reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1848                         IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1849                 } else {
1850                         return 10;
1851                 }
1852         }
1853         IXGBE_WRITE_FLUSH(hw);
1854         usleep_range(10000, 20000);
1855
1856         /* Disable Atlas Tx lanes; re-enabled in reset path */
1857         if (hw->mac.type == ixgbe_mac_82598EB) {
1858                 u8 atlas;
1859
1860                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1861                 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1862                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1863
1864                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1865                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1866                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1867
1868                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1869                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1870                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1871
1872                 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1873                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1874                 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1875         }
1876
1877         return 0;
1878 }
1879
1880 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1881 {
1882         u32 reg_data;
1883
1884         reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1885         reg_data &= ~IXGBE_HLREG0_LPBK;
1886         IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1887 }
1888
1889 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1890                                       unsigned int frame_size)
1891 {
1892         memset(skb->data, 0xFF, frame_size);
1893         frame_size >>= 1;
1894         memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1895         memset(&skb->data[frame_size + 10], 0xBE, 1);
1896         memset(&skb->data[frame_size + 12], 0xAF, 1);
1897 }
1898
1899 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1900                                      unsigned int frame_size)
1901 {
1902         unsigned char *data;
1903         bool match = true;
1904
1905         frame_size >>= 1;
1906
1907         data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1908
1909         if (data[3] != 0xFF ||
1910             data[frame_size + 10] != 0xBE ||
1911             data[frame_size + 12] != 0xAF)
1912                 match = false;
1913
1914         kunmap(rx_buffer->page);
1915
1916         return match;
1917 }
1918
1919 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1920                                   struct ixgbe_ring *tx_ring,
1921                                   unsigned int size)
1922 {
1923         union ixgbe_adv_rx_desc *rx_desc;
1924         u16 rx_ntc, tx_ntc, count = 0;
1925
1926         /* initialize next to clean and descriptor values */
1927         rx_ntc = rx_ring->next_to_clean;
1928         tx_ntc = tx_ring->next_to_clean;
1929         rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1930
1931         while (tx_ntc != tx_ring->next_to_use) {
1932                 union ixgbe_adv_tx_desc *tx_desc;
1933                 struct ixgbe_tx_buffer *tx_buffer;
1934
1935                 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
1936
1937                 /* if DD is not set transmit has not completed */
1938                 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1939                         return count;
1940
1941                 /* unmap buffer on Tx side */
1942                 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1943
1944                 /* Free all the Tx ring sk_buffs */
1945                 dev_kfree_skb_any(tx_buffer->skb);
1946
1947                 /* unmap skb header data */
1948                 dma_unmap_single(tx_ring->dev,
1949                                  dma_unmap_addr(tx_buffer, dma),
1950                                  dma_unmap_len(tx_buffer, len),
1951                                  DMA_TO_DEVICE);
1952                 dma_unmap_len_set(tx_buffer, len, 0);
1953
1954                 /* increment Tx next to clean counter */
1955                 tx_ntc++;
1956                 if (tx_ntc == tx_ring->count)
1957                         tx_ntc = 0;
1958         }
1959
1960         while (rx_desc->wb.upper.length) {
1961                 struct ixgbe_rx_buffer *rx_buffer;
1962
1963                 /* check Rx buffer */
1964                 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1965
1966                 /* sync Rx buffer for CPU read */
1967                 dma_sync_single_for_cpu(rx_ring->dev,
1968                                         rx_buffer->dma,
1969                                         ixgbe_rx_bufsz(rx_ring),
1970                                         DMA_FROM_DEVICE);
1971
1972                 /* verify contents of skb */
1973                 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1974                         count++;
1975                 else
1976                         break;
1977
1978                 /* sync Rx buffer for device write */
1979                 dma_sync_single_for_device(rx_ring->dev,
1980                                            rx_buffer->dma,
1981                                            ixgbe_rx_bufsz(rx_ring),
1982                                            DMA_FROM_DEVICE);
1983
1984                 /* increment Rx next to clean counter */
1985                 rx_ntc++;
1986                 if (rx_ntc == rx_ring->count)
1987                         rx_ntc = 0;
1988
1989                 /* fetch next descriptor */
1990                 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1991         }
1992
1993         netdev_tx_reset_queue(txring_txq(tx_ring));
1994
1995         /* re-map buffers to ring, store next to clean values */
1996         ixgbe_alloc_rx_buffers(rx_ring, count);
1997         rx_ring->next_to_clean = rx_ntc;
1998         tx_ring->next_to_clean = tx_ntc;
1999
2000         return count;
2001 }
2002
2003 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2004 {
2005         struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2006         struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2007         int i, j, lc, good_cnt, ret_val = 0;
2008         unsigned int size = 1024;
2009         netdev_tx_t tx_ret_val;
2010         struct sk_buff *skb;
2011         u32 flags_orig = adapter->flags;
2012
2013         /* DCB can modify the frames on Tx */
2014         adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2015
2016         /* allocate test skb */
2017         skb = alloc_skb(size, GFP_KERNEL);
2018         if (!skb)
2019                 return 11;
2020
2021         /* place data into test skb */
2022         ixgbe_create_lbtest_frame(skb, size);
2023         skb_put(skb, size);
2024
2025         /*
2026          * Calculate the loop count based on the largest descriptor ring
2027          * The idea is to wrap the largest ring a number of times using 64
2028          * send/receive pairs during each loop
2029          */
2030
2031         if (rx_ring->count <= tx_ring->count)
2032                 lc = ((tx_ring->count / 64) * 2) + 1;
2033         else
2034                 lc = ((rx_ring->count / 64) * 2) + 1;
2035
2036         for (j = 0; j <= lc; j++) {
2037                 /* reset count of good packets */
2038                 good_cnt = 0;
2039
2040                 /* place 64 packets on the transmit queue*/
2041                 for (i = 0; i < 64; i++) {
2042                         skb_get(skb);
2043                         tx_ret_val = ixgbe_xmit_frame_ring(skb,
2044                                                            adapter,
2045                                                            tx_ring);
2046                         if (tx_ret_val == NETDEV_TX_OK)
2047                                 good_cnt++;
2048                 }
2049
2050                 if (good_cnt != 64) {
2051                         ret_val = 12;
2052                         break;
2053                 }
2054
2055                 /* allow 200 milliseconds for packets to go from Tx to Rx */
2056                 msleep(200);
2057
2058                 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2059                 if (good_cnt != 64) {
2060                         ret_val = 13;
2061                         break;
2062                 }
2063         }
2064
2065         /* free the original skb */
2066         kfree_skb(skb);
2067         adapter->flags = flags_orig;
2068
2069         return ret_val;
2070 }
2071
2072 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2073 {
2074         *data = ixgbe_setup_desc_rings(adapter);
2075         if (*data)
2076                 goto out;
2077         *data = ixgbe_setup_loopback_test(adapter);
2078         if (*data)
2079                 goto err_loopback;
2080         *data = ixgbe_run_loopback_test(adapter);
2081         ixgbe_loopback_cleanup(adapter);
2082
2083 err_loopback:
2084         ixgbe_free_desc_rings(adapter);
2085 out:
2086         return *data;
2087 }
2088
2089 static void ixgbe_diag_test(struct net_device *netdev,
2090                             struct ethtool_test *eth_test, u64 *data)
2091 {
2092         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2093         bool if_running = netif_running(netdev);
2094
2095         if (ixgbe_removed(adapter->hw.hw_addr)) {
2096                 e_err(hw, "Adapter removed - test blocked\n");
2097                 data[0] = 1;
2098                 data[1] = 1;
2099                 data[2] = 1;
2100                 data[3] = 1;
2101                 data[4] = 1;
2102                 eth_test->flags |= ETH_TEST_FL_FAILED;
2103                 return;
2104         }
2105         set_bit(__IXGBE_TESTING, &adapter->state);
2106         if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2107                 struct ixgbe_hw *hw = &adapter->hw;
2108
2109                 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2110                         int i;
2111                         for (i = 0; i < adapter->num_vfs; i++) {
2112                                 if (adapter->vfinfo[i].clear_to_send) {
2113                                         netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2114                                         data[0] = 1;
2115                                         data[1] = 1;
2116                                         data[2] = 1;
2117                                         data[3] = 1;
2118                                         data[4] = 1;
2119                                         eth_test->flags |= ETH_TEST_FL_FAILED;
2120                                         clear_bit(__IXGBE_TESTING,
2121                                                   &adapter->state);
2122                                         goto skip_ol_tests;
2123                                 }
2124                         }
2125                 }
2126
2127                 /* Offline tests */
2128                 e_info(hw, "offline testing starting\n");
2129
2130                 /* Link test performed before hardware reset so autoneg doesn't
2131                  * interfere with test result
2132                  */
2133                 if (ixgbe_link_test(adapter, &data[4]))
2134                         eth_test->flags |= ETH_TEST_FL_FAILED;
2135
2136                 if (if_running)
2137                         /* indicate we're in test mode */
2138                         ixgbe_close(netdev);
2139                 else
2140                         ixgbe_reset(adapter);
2141
2142                 e_info(hw, "register testing starting\n");
2143                 if (ixgbe_reg_test(adapter, &data[0]))
2144                         eth_test->flags |= ETH_TEST_FL_FAILED;
2145
2146                 ixgbe_reset(adapter);
2147                 e_info(hw, "eeprom testing starting\n");
2148                 if (ixgbe_eeprom_test(adapter, &data[1]))
2149                         eth_test->flags |= ETH_TEST_FL_FAILED;
2150
2151                 ixgbe_reset(adapter);
2152                 e_info(hw, "interrupt testing starting\n");
2153                 if (ixgbe_intr_test(adapter, &data[2]))
2154                         eth_test->flags |= ETH_TEST_FL_FAILED;
2155
2156                 /* If SRIOV or VMDq is enabled then skip MAC
2157                  * loopback diagnostic. */
2158                 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2159                                       IXGBE_FLAG_VMDQ_ENABLED)) {
2160                         e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2161                         data[3] = 0;
2162                         goto skip_loopback;
2163                 }
2164
2165                 ixgbe_reset(adapter);
2166                 e_info(hw, "loopback testing starting\n");
2167                 if (ixgbe_loopback_test(adapter, &data[3]))
2168                         eth_test->flags |= ETH_TEST_FL_FAILED;
2169
2170 skip_loopback:
2171                 ixgbe_reset(adapter);
2172
2173                 /* clear testing bit and return adapter to previous state */
2174                 clear_bit(__IXGBE_TESTING, &adapter->state);
2175                 if (if_running)
2176                         ixgbe_open(netdev);
2177                 else if (hw->mac.ops.disable_tx_laser)
2178                         hw->mac.ops.disable_tx_laser(hw);
2179         } else {
2180                 e_info(hw, "online testing starting\n");
2181
2182                 /* Online tests */
2183                 if (ixgbe_link_test(adapter, &data[4]))
2184                         eth_test->flags |= ETH_TEST_FL_FAILED;
2185
2186                 /* Offline tests aren't run; pass by default */
2187                 data[0] = 0;
2188                 data[1] = 0;
2189                 data[2] = 0;
2190                 data[3] = 0;
2191
2192                 clear_bit(__IXGBE_TESTING, &adapter->state);
2193         }
2194
2195 skip_ol_tests:
2196         msleep_interruptible(4 * 1000);
2197 }
2198
2199 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2200                                struct ethtool_wolinfo *wol)
2201 {
2202         struct ixgbe_hw *hw = &adapter->hw;
2203         int retval = 0;
2204
2205         /* WOL not supported for all devices */
2206         if (!ixgbe_wol_supported(adapter, hw->device_id,
2207                                  hw->subsystem_device_id)) {
2208                 retval = 1;
2209                 wol->supported = 0;
2210         }
2211
2212         return retval;
2213 }
2214
2215 static void ixgbe_get_wol(struct net_device *netdev,
2216                           struct ethtool_wolinfo *wol)
2217 {
2218         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2219
2220         wol->supported = WAKE_UCAST | WAKE_MCAST |
2221                          WAKE_BCAST | WAKE_MAGIC;
2222         wol->wolopts = 0;
2223
2224         if (ixgbe_wol_exclusion(adapter, wol) ||
2225             !device_can_wakeup(&adapter->pdev->dev))
2226                 return;
2227
2228         if (adapter->wol & IXGBE_WUFC_EX)
2229                 wol->wolopts |= WAKE_UCAST;
2230         if (adapter->wol & IXGBE_WUFC_MC)
2231                 wol->wolopts |= WAKE_MCAST;
2232         if (adapter->wol & IXGBE_WUFC_BC)
2233                 wol->wolopts |= WAKE_BCAST;
2234         if (adapter->wol & IXGBE_WUFC_MAG)
2235                 wol->wolopts |= WAKE_MAGIC;
2236 }
2237
2238 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2239 {
2240         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2241
2242         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2243                 return -EOPNOTSUPP;
2244
2245         if (ixgbe_wol_exclusion(adapter, wol))
2246                 return wol->wolopts ? -EOPNOTSUPP : 0;
2247
2248         adapter->wol = 0;
2249
2250         if (wol->wolopts & WAKE_UCAST)
2251                 adapter->wol |= IXGBE_WUFC_EX;
2252         if (wol->wolopts & WAKE_MCAST)
2253                 adapter->wol |= IXGBE_WUFC_MC;
2254         if (wol->wolopts & WAKE_BCAST)
2255                 adapter->wol |= IXGBE_WUFC_BC;
2256         if (wol->wolopts & WAKE_MAGIC)
2257                 adapter->wol |= IXGBE_WUFC_MAG;
2258
2259         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2260
2261         return 0;
2262 }
2263
2264 static int ixgbe_nway_reset(struct net_device *netdev)
2265 {
2266         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2267
2268         if (netif_running(netdev))
2269                 ixgbe_reinit_locked(adapter);
2270
2271         return 0;
2272 }
2273
2274 static int ixgbe_set_phys_id(struct net_device *netdev,
2275                              enum ethtool_phys_id_state state)
2276 {
2277         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2278         struct ixgbe_hw *hw = &adapter->hw;
2279
2280         if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2281                 return -EOPNOTSUPP;
2282
2283         switch (state) {
2284         case ETHTOOL_ID_ACTIVE:
2285                 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2286                 return 2;
2287
2288         case ETHTOOL_ID_ON:
2289                 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2290                 break;
2291
2292         case ETHTOOL_ID_OFF:
2293                 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2294                 break;
2295
2296         case ETHTOOL_ID_INACTIVE:
2297                 /* Restore LED settings */
2298                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2299                 break;
2300         }
2301
2302         return 0;
2303 }
2304
2305 static int ixgbe_get_coalesce(struct net_device *netdev,
2306                               struct ethtool_coalesce *ec)
2307 {
2308         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2309
2310         /* only valid if in constant ITR mode */
2311         if (adapter->rx_itr_setting <= 1)
2312                 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2313         else
2314                 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2315
2316         /* if in mixed tx/rx queues per vector mode, report only rx settings */
2317         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2318                 return 0;
2319
2320         /* only valid if in constant ITR mode */
2321         if (adapter->tx_itr_setting <= 1)
2322                 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2323         else
2324                 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2325
2326         return 0;
2327 }
2328
2329 /*
2330  * this function must be called before setting the new value of
2331  * rx_itr_setting
2332  */
2333 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2334 {
2335         struct net_device *netdev = adapter->netdev;
2336
2337         /* nothing to do if LRO or RSC are not enabled */
2338         if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2339             !(netdev->features & NETIF_F_LRO))
2340                 return false;
2341
2342         /* check the feature flag value and enable RSC if necessary */
2343         if (adapter->rx_itr_setting == 1 ||
2344             adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2345                 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2346                         adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2347                         e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2348                         return true;
2349                 }
2350         /* if interrupt rate is too high then disable RSC */
2351         } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2352                 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2353                 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2354                 return true;
2355         }
2356         return false;
2357 }
2358
2359 static int ixgbe_set_coalesce(struct net_device *netdev,
2360                               struct ethtool_coalesce *ec)
2361 {
2362         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2363         struct ixgbe_q_vector *q_vector;
2364         int i;
2365         u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2366         bool need_reset = false;
2367
2368         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2369                 /* reject Tx specific changes in case of mixed RxTx vectors */
2370                 if (ec->tx_coalesce_usecs)
2371                         return -EINVAL;
2372                 tx_itr_prev = adapter->rx_itr_setting;
2373         } else {
2374                 tx_itr_prev = adapter->tx_itr_setting;
2375         }
2376
2377         if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2378             (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2379                 return -EINVAL;
2380
2381         if (ec->rx_coalesce_usecs > 1)
2382                 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2383         else
2384                 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2385
2386         if (adapter->rx_itr_setting == 1)
2387                 rx_itr_param = IXGBE_20K_ITR;
2388         else
2389                 rx_itr_param = adapter->rx_itr_setting;
2390
2391         if (ec->tx_coalesce_usecs > 1)
2392                 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2393         else
2394                 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2395
2396         if (adapter->tx_itr_setting == 1)
2397                 tx_itr_param = IXGBE_12K_ITR;
2398         else
2399                 tx_itr_param = adapter->tx_itr_setting;
2400
2401         /* mixed Rx/Tx */
2402         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2403                 adapter->tx_itr_setting = adapter->rx_itr_setting;
2404
2405         /* detect ITR changes that require update of TXDCTL.WTHRESH */
2406         if ((adapter->tx_itr_setting != 1) &&
2407             (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2408                 if ((tx_itr_prev == 1) ||
2409                     (tx_itr_prev >= IXGBE_100K_ITR))
2410                         need_reset = true;
2411         } else {
2412                 if ((tx_itr_prev != 1) &&
2413                     (tx_itr_prev < IXGBE_100K_ITR))
2414                         need_reset = true;
2415         }
2416
2417         /* check the old value and enable RSC if necessary */
2418         need_reset |= ixgbe_update_rsc(adapter);
2419
2420         for (i = 0; i < adapter->num_q_vectors; i++) {
2421                 q_vector = adapter->q_vector[i];
2422                 if (q_vector->tx.count && !q_vector->rx.count)
2423                         /* tx only */
2424                         q_vector->itr = tx_itr_param;
2425                 else
2426                         /* rx only or mixed */
2427                         q_vector->itr = rx_itr_param;
2428                 ixgbe_write_eitr(q_vector);
2429         }
2430
2431         /*
2432          * do reset here at the end to make sure EITR==0 case is handled
2433          * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2434          * also locks in RSC enable/disable which requires reset
2435          */
2436         if (need_reset)
2437                 ixgbe_do_reset(netdev);
2438
2439         return 0;
2440 }
2441
2442 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2443                                         struct ethtool_rxnfc *cmd)
2444 {
2445         union ixgbe_atr_input *mask = &adapter->fdir_mask;
2446         struct ethtool_rx_flow_spec *fsp =
2447                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2448         struct hlist_node *node2;
2449         struct ixgbe_fdir_filter *rule = NULL;
2450
2451         /* report total rule count */
2452         cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2453
2454         hlist_for_each_entry_safe(rule, node2,
2455                                   &adapter->fdir_filter_list, fdir_node) {
2456                 if (fsp->location <= rule->sw_idx)
2457                         break;
2458         }
2459
2460         if (!rule || fsp->location != rule->sw_idx)
2461                 return -EINVAL;
2462
2463         /* fill out the flow spec entry */
2464
2465         /* set flow type field */
2466         switch (rule->filter.formatted.flow_type) {
2467         case IXGBE_ATR_FLOW_TYPE_TCPV4:
2468                 fsp->flow_type = TCP_V4_FLOW;
2469                 break;
2470         case IXGBE_ATR_FLOW_TYPE_UDPV4:
2471                 fsp->flow_type = UDP_V4_FLOW;
2472                 break;
2473         case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2474                 fsp->flow_type = SCTP_V4_FLOW;
2475                 break;
2476         case IXGBE_ATR_FLOW_TYPE_IPV4:
2477                 fsp->flow_type = IP_USER_FLOW;
2478                 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2479                 fsp->h_u.usr_ip4_spec.proto = 0;
2480                 fsp->m_u.usr_ip4_spec.proto = 0;
2481                 break;
2482         default:
2483                 return -EINVAL;
2484         }
2485
2486         fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2487         fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2488         fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2489         fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2490         fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2491         fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2492         fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2493         fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2494         fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2495         fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2496         fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2497         fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2498         fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2499         fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2500         fsp->flow_type |= FLOW_EXT;
2501
2502         /* record action */
2503         if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2504                 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2505         else
2506                 fsp->ring_cookie = rule->action;
2507
2508         return 0;
2509 }
2510
2511 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2512                                       struct ethtool_rxnfc *cmd,
2513                                       u32 *rule_locs)
2514 {
2515         struct hlist_node *node2;
2516         struct ixgbe_fdir_filter *rule;
2517         int cnt = 0;
2518
2519         /* report total rule count */
2520         cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2521
2522         hlist_for_each_entry_safe(rule, node2,
2523                                   &adapter->fdir_filter_list, fdir_node) {
2524                 if (cnt == cmd->rule_cnt)
2525                         return -EMSGSIZE;
2526                 rule_locs[cnt] = rule->sw_idx;
2527                 cnt++;
2528         }
2529
2530         cmd->rule_cnt = cnt;
2531
2532         return 0;
2533 }
2534
2535 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2536                                    struct ethtool_rxnfc *cmd)
2537 {
2538         cmd->data = 0;
2539
2540         /* Report default options for RSS on ixgbe */
2541         switch (cmd->flow_type) {
2542         case TCP_V4_FLOW:
2543                 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2544                 /* fallthrough */
2545         case UDP_V4_FLOW:
2546                 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2547                         cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2548                 /* fallthrough */
2549         case SCTP_V4_FLOW:
2550         case AH_ESP_V4_FLOW:
2551         case AH_V4_FLOW:
2552         case ESP_V4_FLOW:
2553         case IPV4_FLOW:
2554                 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2555                 break;
2556         case TCP_V6_FLOW:
2557                 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2558                 /* fallthrough */
2559         case UDP_V6_FLOW:
2560                 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2561                         cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2562                 /* fallthrough */
2563         case SCTP_V6_FLOW:
2564         case AH_ESP_V6_FLOW:
2565         case AH_V6_FLOW:
2566         case ESP_V6_FLOW:
2567         case IPV6_FLOW:
2568                 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2569                 break;
2570         default:
2571                 return -EINVAL;
2572         }
2573
2574         return 0;
2575 }
2576
2577 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2578                            u32 *rule_locs)
2579 {
2580         struct ixgbe_adapter *adapter = netdev_priv(dev);
2581         int ret = -EOPNOTSUPP;
2582
2583         switch (cmd->cmd) {
2584         case ETHTOOL_GRXRINGS:
2585                 cmd->data = adapter->num_rx_queues;
2586                 ret = 0;
2587                 break;
2588         case ETHTOOL_GRXCLSRLCNT:
2589                 cmd->rule_cnt = adapter->fdir_filter_count;
2590                 ret = 0;
2591                 break;
2592         case ETHTOOL_GRXCLSRULE:
2593                 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2594                 break;
2595         case ETHTOOL_GRXCLSRLALL:
2596                 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2597                 break;
2598         case ETHTOOL_GRXFH:
2599                 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2600                 break;
2601         default:
2602                 break;
2603         }
2604
2605         return ret;
2606 }
2607
2608 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2609                                     struct ixgbe_fdir_filter *input,
2610                                     u16 sw_idx)
2611 {
2612         struct ixgbe_hw *hw = &adapter->hw;
2613         struct hlist_node *node2;
2614         struct ixgbe_fdir_filter *rule, *parent;
2615         int err = -EINVAL;
2616
2617         parent = NULL;
2618         rule = NULL;
2619
2620         hlist_for_each_entry_safe(rule, node2,
2621                                   &adapter->fdir_filter_list, fdir_node) {
2622                 /* hash found, or no matching entry */
2623                 if (rule->sw_idx >= sw_idx)
2624                         break;
2625                 parent = rule;
2626         }
2627
2628         /* if there is an old rule occupying our place remove it */
2629         if (rule && (rule->sw_idx == sw_idx)) {
2630                 if (!input || (rule->filter.formatted.bkt_hash !=
2631                                input->filter.formatted.bkt_hash)) {
2632                         err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2633                                                                 &rule->filter,
2634                                                                 sw_idx);
2635                 }
2636
2637                 hlist_del(&rule->fdir_node);
2638                 kfree(rule);
2639                 adapter->fdir_filter_count--;
2640         }
2641
2642         /*
2643          * If no input this was a delete, err should be 0 if a rule was
2644          * successfully found and removed from the list else -EINVAL
2645          */
2646         if (!input)
2647                 return err;
2648
2649         /* initialize node and set software index */
2650         INIT_HLIST_NODE(&input->fdir_node);
2651
2652         /* add filter to the list */
2653         if (parent)
2654                 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2655         else
2656                 hlist_add_head(&input->fdir_node,
2657                                &adapter->fdir_filter_list);
2658
2659         /* update counts */
2660         adapter->fdir_filter_count++;
2661
2662         return 0;
2663 }
2664
2665 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2666                                        u8 *flow_type)
2667 {
2668         switch (fsp->flow_type & ~FLOW_EXT) {
2669         case TCP_V4_FLOW:
2670                 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2671                 break;
2672         case UDP_V4_FLOW:
2673                 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2674                 break;
2675         case SCTP_V4_FLOW:
2676                 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2677                 break;
2678         case IP_USER_FLOW:
2679                 switch (fsp->h_u.usr_ip4_spec.proto) {
2680                 case IPPROTO_TCP:
2681                         *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2682                         break;
2683                 case IPPROTO_UDP:
2684                         *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2685                         break;
2686                 case IPPROTO_SCTP:
2687                         *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2688                         break;
2689                 case 0:
2690                         if (!fsp->m_u.usr_ip4_spec.proto) {
2691                                 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2692                                 break;
2693                         }
2694                         /* fall through */
2695                 default:
2696                         return 0;
2697                 }
2698                 break;
2699         default:
2700                 return 0;
2701         }
2702
2703         return 1;
2704 }
2705
2706 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2707                                         struct ethtool_rxnfc *cmd)
2708 {
2709         struct ethtool_rx_flow_spec *fsp =
2710                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2711         struct ixgbe_hw *hw = &adapter->hw;
2712         struct ixgbe_fdir_filter *input;
2713         union ixgbe_atr_input mask;
2714         u8 queue;
2715         int err;
2716
2717         if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2718                 return -EOPNOTSUPP;
2719
2720         /* ring_cookie is a masked into a set of queues and ixgbe pools or
2721          * we use the drop index.
2722          */
2723         if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2724                 queue = IXGBE_FDIR_DROP_QUEUE;
2725         } else {
2726                 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2727                 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2728
2729                 if (!vf && (ring >= adapter->num_rx_queues))
2730                         return -EINVAL;
2731                 else if (vf &&
2732                          ((vf > adapter->num_vfs) ||
2733                            ring >= adapter->num_rx_queues_per_pool))
2734                         return -EINVAL;
2735
2736                 /* Map the ring onto the absolute queue index */
2737                 if (!vf)
2738                         queue = adapter->rx_ring[ring]->reg_idx;
2739                 else
2740                         queue = ((vf - 1) *
2741                                 adapter->num_rx_queues_per_pool) + ring;
2742         }
2743
2744         /* Don't allow indexes to exist outside of available space */
2745         if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2746                 e_err(drv, "Location out of range\n");
2747                 return -EINVAL;
2748         }
2749
2750         input = kzalloc(sizeof(*input), GFP_ATOMIC);
2751         if (!input)
2752                 return -ENOMEM;
2753
2754         memset(&mask, 0, sizeof(union ixgbe_atr_input));
2755
2756         /* set SW index */
2757         input->sw_idx = fsp->location;
2758
2759         /* record flow type */
2760         if (!ixgbe_flowspec_to_flow_type(fsp,
2761                                          &input->filter.formatted.flow_type)) {
2762                 e_err(drv, "Unrecognized flow type\n");
2763                 goto err_out;
2764         }
2765
2766         mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2767                                    IXGBE_ATR_L4TYPE_MASK;
2768
2769         if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2770                 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2771
2772         /* Copy input into formatted structures */
2773         input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2774         mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2775         input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2776         mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2777         input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2778         mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2779         input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2780         mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2781
2782         if (fsp->flow_type & FLOW_EXT) {
2783                 input->filter.formatted.vm_pool =
2784                                 (unsigned char)ntohl(fsp->h_ext.data[1]);
2785                 mask.formatted.vm_pool =
2786                                 (unsigned char)ntohl(fsp->m_ext.data[1]);
2787                 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2788                 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2789                 input->filter.formatted.flex_bytes =
2790                                                 fsp->h_ext.vlan_etype;
2791                 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2792         }
2793
2794         /* determine if we need to drop or route the packet */
2795         if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2796                 input->action = IXGBE_FDIR_DROP_QUEUE;
2797         else
2798                 input->action = fsp->ring_cookie;
2799
2800         spin_lock(&adapter->fdir_perfect_lock);
2801
2802         if (hlist_empty(&adapter->fdir_filter_list)) {
2803                 /* save mask and program input mask into HW */
2804                 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2805                 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2806                 if (err) {
2807                         e_err(drv, "Error writing mask\n");
2808                         goto err_out_w_lock;
2809                 }
2810         } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2811                 e_err(drv, "Only one mask supported per port\n");
2812                 goto err_out_w_lock;
2813         }
2814
2815         /* apply mask and compute/store hash */
2816         ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2817
2818         /* program filters to filter memory */
2819         err = ixgbe_fdir_write_perfect_filter_82599(hw,
2820                                 &input->filter, input->sw_idx, queue);
2821         if (err)
2822                 goto err_out_w_lock;
2823
2824         ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2825
2826         spin_unlock(&adapter->fdir_perfect_lock);
2827
2828         return err;
2829 err_out_w_lock:
2830         spin_unlock(&adapter->fdir_perfect_lock);
2831 err_out:
2832         kfree(input);
2833         return -EINVAL;
2834 }
2835
2836 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2837                                         struct ethtool_rxnfc *cmd)
2838 {
2839         struct ethtool_rx_flow_spec *fsp =
2840                 (struct ethtool_rx_flow_spec *)&cmd->fs;
2841         int err;
2842
2843         spin_lock(&adapter->fdir_perfect_lock);
2844         err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2845         spin_unlock(&adapter->fdir_perfect_lock);
2846
2847         return err;
2848 }
2849
2850 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2851                        IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2852 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2853                                   struct ethtool_rxnfc *nfc)
2854 {
2855         u32 flags2 = adapter->flags2;
2856
2857         /*
2858          * RSS does not support anything other than hashing
2859          * to queues on src and dst IPs and ports
2860          */
2861         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2862                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
2863                 return -EINVAL;
2864
2865         switch (nfc->flow_type) {
2866         case TCP_V4_FLOW:
2867         case TCP_V6_FLOW:
2868                 if (!(nfc->data & RXH_IP_SRC) ||
2869                     !(nfc->data & RXH_IP_DST) ||
2870                     !(nfc->data & RXH_L4_B_0_1) ||
2871                     !(nfc->data & RXH_L4_B_2_3))
2872                         return -EINVAL;
2873                 break;
2874         case UDP_V4_FLOW:
2875                 if (!(nfc->data & RXH_IP_SRC) ||
2876                     !(nfc->data & RXH_IP_DST))
2877                         return -EINVAL;
2878                 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2879                 case 0:
2880                         flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2881                         break;
2882                 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2883                         flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2884                         break;
2885                 default:
2886                         return -EINVAL;
2887                 }
2888                 break;
2889         case UDP_V6_FLOW:
2890                 if (!(nfc->data & RXH_IP_SRC) ||
2891                     !(nfc->data & RXH_IP_DST))
2892                         return -EINVAL;
2893                 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2894                 case 0:
2895                         flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2896                         break;
2897                 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2898                         flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2899                         break;
2900                 default:
2901                         return -EINVAL;
2902                 }
2903                 break;
2904         case AH_ESP_V4_FLOW:
2905         case AH_V4_FLOW:
2906         case ESP_V4_FLOW:
2907         case SCTP_V4_FLOW:
2908         case AH_ESP_V6_FLOW:
2909         case AH_V6_FLOW:
2910         case ESP_V6_FLOW:
2911         case SCTP_V6_FLOW:
2912                 if (!(nfc->data & RXH_IP_SRC) ||
2913                     !(nfc->data & RXH_IP_DST) ||
2914                     (nfc->data & RXH_L4_B_0_1) ||
2915                     (nfc->data & RXH_L4_B_2_3))
2916                         return -EINVAL;
2917                 break;
2918         default:
2919                 return -EINVAL;
2920         }
2921
2922         /* if we changed something we need to update flags */
2923         if (flags2 != adapter->flags2) {
2924                 struct ixgbe_hw *hw = &adapter->hw;
2925                 u32 mrqc;
2926                 unsigned int pf_pool = adapter->num_vfs;
2927
2928                 if ((hw->mac.type >= ixgbe_mac_X550) &&
2929                     (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2930                         mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2931                 else
2932                         mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2933
2934                 if ((flags2 & UDP_RSS_FLAGS) &&
2935                     !(adapter->flags2 & UDP_RSS_FLAGS))
2936                         e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2937
2938                 adapter->flags2 = flags2;
2939
2940                 /* Perform hash on these packet types */
2941                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2942                       | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2943                       | IXGBE_MRQC_RSS_FIELD_IPV6
2944                       | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2945
2946                 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2947                           IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2948
2949                 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2950                         mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2951
2952                 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953                         mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2954
2955                 if ((hw->mac.type >= ixgbe_mac_X550) &&
2956                     (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2957                         IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2958                 else
2959                         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2960         }
2961
2962         return 0;
2963 }
2964
2965 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2966 {
2967         struct ixgbe_adapter *adapter = netdev_priv(dev);
2968         int ret = -EOPNOTSUPP;
2969
2970         switch (cmd->cmd) {
2971         case ETHTOOL_SRXCLSRLINS:
2972                 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2973                 break;
2974         case ETHTOOL_SRXCLSRLDEL:
2975                 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2976                 break;
2977         case ETHTOOL_SRXFH:
2978                 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2979                 break;
2980         default:
2981                 break;
2982         }
2983
2984         return ret;
2985 }
2986
2987 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2988 {
2989         if (adapter->hw.mac.type < ixgbe_mac_X550)
2990                 return 16;
2991         else
2992                 return 64;
2993 }
2994
2995 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2996 {
2997         return IXGBE_RSS_KEY_SIZE;
2998 }
2999
3000 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3001 {
3002         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3003
3004         return ixgbe_rss_indir_tbl_entries(adapter);
3005 }
3006
3007 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3008 {
3009         int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3010         u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3011
3012         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3013                 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3014
3015         for (i = 0; i < reta_size; i++)
3016                 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3017 }
3018
3019 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3020                           u8 *hfunc)
3021 {
3022         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3023
3024         if (hfunc)
3025                 *hfunc = ETH_RSS_HASH_TOP;
3026
3027         if (indir)
3028                 ixgbe_get_reta(adapter, indir);
3029
3030         if (key)
3031                 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3032
3033         return 0;
3034 }
3035
3036 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3037                           const u8 *key, const u8 hfunc)
3038 {
3039         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3040         int i;
3041         u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3042
3043         if (hfunc)
3044                 return -EINVAL;
3045
3046         /* Fill out the redirection table */
3047         if (indir) {
3048                 int max_queues = min_t(int, adapter->num_rx_queues,
3049                                        ixgbe_rss_indir_tbl_max(adapter));
3050
3051                 /*Allow at least 2 queues w/ SR-IOV.*/
3052                 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3053                     (max_queues < 2))
3054                         max_queues = 2;
3055
3056                 /* Verify user input. */
3057                 for (i = 0; i < reta_entries; i++)
3058                         if (indir[i] >= max_queues)
3059                                 return -EINVAL;
3060
3061                 for (i = 0; i < reta_entries; i++)
3062                         adapter->rss_indir_tbl[i] = indir[i];
3063
3064                 ixgbe_store_reta(adapter);
3065         }
3066
3067         /* Fill out the rss hash key */
3068         if (key) {
3069                 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3070                 ixgbe_store_key(adapter);
3071         }
3072
3073         return 0;
3074 }
3075
3076 static int ixgbe_get_ts_info(struct net_device *dev,
3077                              struct ethtool_ts_info *info)
3078 {
3079         struct ixgbe_adapter *adapter = netdev_priv(dev);
3080
3081         /* we always support timestamping disabled */
3082         info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3083
3084         switch (adapter->hw.mac.type) {
3085         case ixgbe_mac_X550:
3086         case ixgbe_mac_X550EM_x:
3087         case ixgbe_mac_x550em_a:
3088                 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3089                 break;
3090         case ixgbe_mac_X540:
3091         case ixgbe_mac_82599EB:
3092                 info->rx_filters |=
3093                         BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3094                         BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3095                         BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3096                 break;
3097         default:
3098                 return ethtool_op_get_ts_info(dev, info);
3099         }
3100
3101         info->so_timestamping =
3102                 SOF_TIMESTAMPING_TX_SOFTWARE |
3103                 SOF_TIMESTAMPING_RX_SOFTWARE |
3104                 SOF_TIMESTAMPING_SOFTWARE |
3105                 SOF_TIMESTAMPING_TX_HARDWARE |
3106                 SOF_TIMESTAMPING_RX_HARDWARE |
3107                 SOF_TIMESTAMPING_RAW_HARDWARE;
3108
3109         if (adapter->ptp_clock)
3110                 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3111         else
3112                 info->phc_index = -1;
3113
3114         info->tx_types =
3115                 BIT(HWTSTAMP_TX_OFF) |
3116                 BIT(HWTSTAMP_TX_ON);
3117
3118         return 0;
3119 }
3120
3121 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3122 {
3123         unsigned int max_combined;
3124         u8 tcs = adapter->hw_tcs;
3125
3126         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3127                 /* We only support one q_vector without MSI-X */
3128                 max_combined = 1;
3129         } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3130                 /* Limit value based on the queue mask */
3131                 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3132         } else if (tcs > 1) {
3133                 /* For DCB report channels per traffic class */
3134                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3135                         /* 8 TC w/ 4 queues per TC */
3136                         max_combined = 4;
3137                 } else if (tcs > 4) {
3138                         /* 8 TC w/ 8 queues per TC */
3139                         max_combined = 8;
3140                 } else {
3141                         /* 4 TC w/ 16 queues per TC */
3142                         max_combined = 16;
3143                 }
3144         } else if (adapter->atr_sample_rate) {
3145                 /* support up to 64 queues with ATR */
3146                 max_combined = IXGBE_MAX_FDIR_INDICES;
3147         } else {
3148                 /* support up to 16 queues with RSS */
3149                 max_combined = ixgbe_max_rss_indices(adapter);
3150         }
3151
3152         return max_combined;
3153 }
3154
3155 static void ixgbe_get_channels(struct net_device *dev,
3156                                struct ethtool_channels *ch)
3157 {
3158         struct ixgbe_adapter *adapter = netdev_priv(dev);
3159
3160         /* report maximum channels */
3161         ch->max_combined = ixgbe_max_channels(adapter);
3162
3163         /* report info for other vector */
3164         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3165                 ch->max_other = NON_Q_VECTORS;
3166                 ch->other_count = NON_Q_VECTORS;
3167         }
3168
3169         /* record RSS queues */
3170         ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3171
3172         /* nothing else to report if RSS is disabled */
3173         if (ch->combined_count == 1)
3174                 return;
3175
3176         /* we do not support ATR queueing if SR-IOV is enabled */
3177         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3178                 return;
3179
3180         /* same thing goes for being DCB enabled */
3181         if (adapter->hw_tcs > 1)
3182                 return;
3183
3184         /* if ATR is disabled we can exit */
3185         if (!adapter->atr_sample_rate)
3186                 return;
3187
3188         /* report flow director queues as maximum channels */
3189         ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3190 }
3191
3192 static int ixgbe_set_channels(struct net_device *dev,
3193                               struct ethtool_channels *ch)
3194 {
3195         struct ixgbe_adapter *adapter = netdev_priv(dev);
3196         unsigned int count = ch->combined_count;
3197         u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3198
3199         /* verify they are not requesting separate vectors */
3200         if (!count || ch->rx_count || ch->tx_count)
3201                 return -EINVAL;
3202
3203         /* verify other_count has not changed */
3204         if (ch->other_count != NON_Q_VECTORS)
3205                 return -EINVAL;
3206
3207         /* verify the number of channels does not exceed hardware limits */
3208         if (count > ixgbe_max_channels(adapter))
3209                 return -EINVAL;
3210
3211         /* update feature limits from largest to smallest supported values */
3212         adapter->ring_feature[RING_F_FDIR].limit = count;
3213
3214         /* cap RSS limit */
3215         if (count > max_rss_indices)
3216                 count = max_rss_indices;
3217         adapter->ring_feature[RING_F_RSS].limit = count;
3218
3219 #ifdef IXGBE_FCOE
3220         /* cap FCoE limit at 8 */
3221         if (count > IXGBE_FCRETA_SIZE)
3222                 count = IXGBE_FCRETA_SIZE;
3223         adapter->ring_feature[RING_F_FCOE].limit = count;
3224
3225 #endif
3226         /* use setup TC to update any traffic class queue mapping */
3227         return ixgbe_setup_tc(dev, adapter->hw_tcs);
3228 }
3229
3230 static int ixgbe_get_module_info(struct net_device *dev,
3231                                        struct ethtool_modinfo *modinfo)
3232 {
3233         struct ixgbe_adapter *adapter = netdev_priv(dev);
3234         struct ixgbe_hw *hw = &adapter->hw;
3235         s32 status;
3236         u8 sff8472_rev, addr_mode;
3237         bool page_swap = false;
3238
3239         if (hw->phy.type == ixgbe_phy_fw)
3240                 return -ENXIO;
3241
3242         /* Check whether we support SFF-8472 or not */
3243         status = hw->phy.ops.read_i2c_eeprom(hw,
3244                                              IXGBE_SFF_SFF_8472_COMP,
3245                                              &sff8472_rev);
3246         if (status)
3247                 return -EIO;
3248
3249         /* addressing mode is not supported */
3250         status = hw->phy.ops.read_i2c_eeprom(hw,
3251                                              IXGBE_SFF_SFF_8472_SWAP,
3252                                              &addr_mode);
3253         if (status)
3254                 return -EIO;
3255
3256         if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3257                 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3258                 page_swap = true;
3259         }
3260
3261         if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3262                 /* We have a SFP, but it does not support SFF-8472 */
3263                 modinfo->type = ETH_MODULE_SFF_8079;
3264                 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3265         } else {
3266                 /* We have a SFP which supports a revision of SFF-8472. */
3267                 modinfo->type = ETH_MODULE_SFF_8472;
3268                 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3269         }
3270
3271         return 0;
3272 }
3273
3274 static int ixgbe_get_module_eeprom(struct net_device *dev,
3275                                          struct ethtool_eeprom *ee,
3276                                          u8 *data)
3277 {
3278         struct ixgbe_adapter *adapter = netdev_priv(dev);
3279         struct ixgbe_hw *hw = &adapter->hw;
3280         s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3281         u8 databyte = 0xFF;
3282         int i = 0;
3283
3284         if (ee->len == 0)
3285                 return -EINVAL;
3286
3287         if (hw->phy.type == ixgbe_phy_fw)
3288                 return -ENXIO;
3289
3290         for (i = ee->offset; i < ee->offset + ee->len; i++) {
3291                 /* I2C reads can take long time */
3292                 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3293                         return -EBUSY;
3294
3295                 if (i < ETH_MODULE_SFF_8079_LEN)
3296                         status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3297                 else
3298                         status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3299
3300                 if (status)
3301                         return -EIO;
3302
3303                 data[i - ee->offset] = databyte;
3304         }
3305
3306         return 0;
3307 }
3308
3309 static const struct {
3310         ixgbe_link_speed mac_speed;
3311         u32 supported;
3312 } ixgbe_ls_map[] = {
3313         { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3314         { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3315         { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3316         { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3317         { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3318 };
3319
3320 static const struct {
3321         u32 lp_advertised;
3322         u32 mac_speed;
3323 } ixgbe_lp_map[] = {
3324         { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3325         { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3326         { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3327         { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3328         { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3329         { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3330 };
3331
3332 static int
3333 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3334 {
3335         u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3336         struct ixgbe_hw *hw = &adapter->hw;
3337         s32 rc;
3338         u16 i;
3339
3340         rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3341         if (rc)
3342                 return rc;
3343
3344         edata->lp_advertised = 0;
3345         for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3346                 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3347                         edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3348         }
3349
3350         edata->supported = 0;
3351         for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3352                 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3353                         edata->supported |= ixgbe_ls_map[i].supported;
3354         }
3355
3356         edata->advertised = 0;
3357         for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3358                 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3359                         edata->advertised |= ixgbe_ls_map[i].supported;
3360         }
3361
3362         edata->eee_enabled = !!edata->advertised;
3363         edata->tx_lpi_enabled = edata->eee_enabled;
3364         if (edata->advertised & edata->lp_advertised)
3365                 edata->eee_active = true;
3366
3367         return 0;
3368 }
3369
3370 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3371 {
3372         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3373         struct ixgbe_hw *hw = &adapter->hw;
3374
3375         if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3376                 return -EOPNOTSUPP;
3377
3378         if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3379                 return ixgbe_get_eee_fw(adapter, edata);
3380
3381         return -EOPNOTSUPP;
3382 }
3383
3384 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3385 {
3386         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3387         struct ixgbe_hw *hw = &adapter->hw;
3388         struct ethtool_eee eee_data;
3389         s32 ret_val;
3390
3391         if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3392                 return -EOPNOTSUPP;
3393
3394         memset(&eee_data, 0, sizeof(struct ethtool_eee));
3395
3396         ret_val = ixgbe_get_eee(netdev, &eee_data);
3397         if (ret_val)
3398                 return ret_val;
3399
3400         if (eee_data.eee_enabled && !edata->eee_enabled) {
3401                 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3402                         e_err(drv, "Setting EEE tx-lpi is not supported\n");
3403                         return -EINVAL;
3404                 }
3405
3406                 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3407                         e_err(drv,
3408                               "Setting EEE Tx LPI timer is not supported\n");
3409                         return -EINVAL;
3410                 }
3411
3412                 if (eee_data.advertised != edata->advertised) {
3413                         e_err(drv,
3414                               "Setting EEE advertised speeds is not supported\n");
3415                         return -EINVAL;
3416                 }
3417         }
3418
3419         if (eee_data.eee_enabled != edata->eee_enabled) {
3420                 if (edata->eee_enabled) {
3421                         adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3422                         hw->phy.eee_speeds_advertised =
3423                                                    hw->phy.eee_speeds_supported;
3424                 } else {
3425                         adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3426                         hw->phy.eee_speeds_advertised = 0;
3427                 }
3428
3429                 /* reset link */
3430                 if (netif_running(netdev))
3431                         ixgbe_reinit_locked(adapter);
3432                 else
3433                         ixgbe_reset(adapter);
3434         }
3435
3436         return 0;
3437 }
3438
3439 static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3440 {
3441         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3442         u32 priv_flags = 0;
3443
3444         if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3445                 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3446
3447         return priv_flags;
3448 }
3449
3450 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3451 {
3452         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3453         unsigned int flags2 = adapter->flags2;
3454
3455         flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3456         if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3457                 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3458
3459         if (flags2 != adapter->flags2) {
3460                 adapter->flags2 = flags2;
3461
3462                 /* reset interface to repopulate queues */
3463                 if (netif_running(netdev))
3464                         ixgbe_reinit_locked(adapter);
3465         }
3466
3467         return 0;
3468 }
3469
3470 static const struct ethtool_ops ixgbe_ethtool_ops = {
3471         .get_drvinfo            = ixgbe_get_drvinfo,
3472         .get_regs_len           = ixgbe_get_regs_len,
3473         .get_regs               = ixgbe_get_regs,
3474         .get_wol                = ixgbe_get_wol,
3475         .set_wol                = ixgbe_set_wol,
3476         .nway_reset             = ixgbe_nway_reset,
3477         .get_link               = ethtool_op_get_link,
3478         .get_eeprom_len         = ixgbe_get_eeprom_len,
3479         .get_eeprom             = ixgbe_get_eeprom,
3480         .set_eeprom             = ixgbe_set_eeprom,
3481         .get_ringparam          = ixgbe_get_ringparam,
3482         .set_ringparam          = ixgbe_set_ringparam,
3483         .get_pauseparam         = ixgbe_get_pauseparam,
3484         .set_pauseparam         = ixgbe_set_pauseparam,
3485         .get_msglevel           = ixgbe_get_msglevel,
3486         .set_msglevel           = ixgbe_set_msglevel,
3487         .self_test              = ixgbe_diag_test,
3488         .get_strings            = ixgbe_get_strings,
3489         .set_phys_id            = ixgbe_set_phys_id,
3490         .get_sset_count         = ixgbe_get_sset_count,
3491         .get_ethtool_stats      = ixgbe_get_ethtool_stats,
3492         .get_coalesce           = ixgbe_get_coalesce,
3493         .set_coalesce           = ixgbe_set_coalesce,
3494         .get_rxnfc              = ixgbe_get_rxnfc,
3495         .set_rxnfc              = ixgbe_set_rxnfc,
3496         .get_rxfh_indir_size    = ixgbe_rss_indir_size,
3497         .get_rxfh_key_size      = ixgbe_get_rxfh_key_size,
3498         .get_rxfh               = ixgbe_get_rxfh,
3499         .set_rxfh               = ixgbe_set_rxfh,
3500         .get_eee                = ixgbe_get_eee,
3501         .set_eee                = ixgbe_set_eee,
3502         .get_channels           = ixgbe_get_channels,
3503         .set_channels           = ixgbe_set_channels,
3504         .get_priv_flags         = ixgbe_get_priv_flags,
3505         .set_priv_flags         = ixgbe_set_priv_flags,
3506         .get_ts_info            = ixgbe_get_ts_info,
3507         .get_module_info        = ixgbe_get_module_info,
3508         .get_module_eeprom      = ixgbe_get_module_eeprom,
3509         .get_link_ksettings     = ixgbe_get_link_ksettings,
3510         .set_link_ksettings     = ixgbe_set_link_ksettings,
3511 };
3512
3513 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3514 {
3515         netdev->ethtool_ops = &ixgbe_ethtool_ops;
3516 }