1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
43 #include "ixgbe_phy.h"
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 enum {NETDEV_STATS, IXGBE_STATS};
51 char stat_string[ETH_GSTRING_LEN];
57 #define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
64 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 {"lsc_int", IXGBE_STAT(lsc_int)},
74 {"tx_busy", IXGBE_STAT(tx_busy)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast)},
81 {"broadcast", IXGBE_STAT(stats.bprc)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
101 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
102 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
103 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
104 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
105 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
106 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
107 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
108 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
109 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
110 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
111 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
112 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
113 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
114 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
115 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
116 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
117 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
118 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
119 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
120 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
122 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
123 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
124 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
125 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
126 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
127 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
128 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
129 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
130 #endif /* IXGBE_FCOE */
133 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
134 * we set the num_rx_queues to evaluate to num_tx_queues. This is
135 * used because we do not have a good way to get the max number of
136 * rx queues with CONFIG_RPS disabled.
138 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
140 #define IXGBE_QUEUE_STATS_LEN ( \
141 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
142 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
143 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
144 #define IXGBE_PB_STATS_LEN ( \
145 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
146 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
147 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
148 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
150 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
151 IXGBE_PB_STATS_LEN + \
152 IXGBE_QUEUE_STATS_LEN)
154 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
155 "Register test (offline)", "Eeprom test (offline)",
156 "Interrupt test (offline)", "Loopback test (offline)",
157 "Link test (on/offline)"
159 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
161 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
162 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
166 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
168 /* currently supported speeds for 10G */
169 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
170 SUPPORTED_10000baseKX4_Full | \
171 SUPPORTED_10000baseKR_Full)
173 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
175 static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
177 if (!ixgbe_isbackplane(hw->phy.media_type))
178 return SUPPORTED_10000baseT_Full;
180 switch (hw->device_id) {
181 case IXGBE_DEV_ID_82598:
182 case IXGBE_DEV_ID_82599_KX4:
183 case IXGBE_DEV_ID_82599_KX4_MEZZ:
184 case IXGBE_DEV_ID_X550EM_X_KX4:
185 return SUPPORTED_10000baseKX4_Full;
186 case IXGBE_DEV_ID_82598_BX:
187 case IXGBE_DEV_ID_82599_KR:
188 case IXGBE_DEV_ID_X550EM_X_KR:
189 case IXGBE_DEV_ID_X550EM_X_XFI:
190 return SUPPORTED_10000baseKR_Full;
192 return SUPPORTED_10000baseKX4_Full |
193 SUPPORTED_10000baseKR_Full;
197 static int ixgbe_get_link_ksettings(struct net_device *netdev,
198 struct ethtool_link_ksettings *cmd)
200 struct ixgbe_adapter *adapter = netdev_priv(netdev);
201 struct ixgbe_hw *hw = &adapter->hw;
202 ixgbe_link_speed supported_link;
203 bool autoneg = false;
204 u32 supported, advertising;
206 ethtool_convert_link_mode_to_legacy_u32(&supported,
207 cmd->link_modes.supported);
209 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
211 /* set the supported link speeds */
212 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
213 supported |= ixgbe_get_supported_10gtypes(hw);
214 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
215 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
216 SUPPORTED_1000baseKX_Full :
217 SUPPORTED_1000baseT_Full;
218 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
219 supported |= SUPPORTED_100baseT_Full;
220 if (supported_link & IXGBE_LINK_SPEED_10_FULL)
221 supported |= SUPPORTED_10baseT_Full;
223 /* default advertised speed if phy.autoneg_advertised isn't set */
224 advertising = supported;
225 /* set the advertised speeds */
226 if (hw->phy.autoneg_advertised) {
228 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
229 advertising |= ADVERTISED_10baseT_Full;
230 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
231 advertising |= ADVERTISED_100baseT_Full;
232 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
233 advertising |= supported & ADVRTSD_MSK_10G;
234 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
235 if (supported & SUPPORTED_1000baseKX_Full)
236 advertising |= ADVERTISED_1000baseKX_Full;
238 advertising |= ADVERTISED_1000baseT_Full;
241 if (hw->phy.multispeed_fiber && !autoneg) {
242 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
243 advertising = ADVERTISED_10000baseT_Full;
248 supported |= SUPPORTED_Autoneg;
249 advertising |= ADVERTISED_Autoneg;
250 cmd->base.autoneg = AUTONEG_ENABLE;
252 cmd->base.autoneg = AUTONEG_DISABLE;
254 /* Determine the remaining settings based on the PHY type. */
255 switch (adapter->hw.phy.type) {
258 case ixgbe_phy_x550em_ext_t:
260 case ixgbe_phy_cu_unknown:
261 supported |= SUPPORTED_TP;
262 advertising |= ADVERTISED_TP;
263 cmd->base.port = PORT_TP;
266 supported |= SUPPORTED_FIBRE;
267 advertising |= ADVERTISED_FIBRE;
268 cmd->base.port = PORT_FIBRE;
271 case ixgbe_phy_sfp_passive_tyco:
272 case ixgbe_phy_sfp_passive_unknown:
273 case ixgbe_phy_sfp_ftl:
274 case ixgbe_phy_sfp_avago:
275 case ixgbe_phy_sfp_intel:
276 case ixgbe_phy_sfp_unknown:
277 case ixgbe_phy_qsfp_passive_unknown:
278 case ixgbe_phy_qsfp_active_unknown:
279 case ixgbe_phy_qsfp_intel:
280 case ixgbe_phy_qsfp_unknown:
281 /* SFP+ devices, further checking needed */
282 switch (adapter->hw.phy.sfp_type) {
283 case ixgbe_sfp_type_da_cu:
284 case ixgbe_sfp_type_da_cu_core0:
285 case ixgbe_sfp_type_da_cu_core1:
286 supported |= SUPPORTED_FIBRE;
287 advertising |= ADVERTISED_FIBRE;
288 cmd->base.port = PORT_DA;
290 case ixgbe_sfp_type_sr:
291 case ixgbe_sfp_type_lr:
292 case ixgbe_sfp_type_srlr_core0:
293 case ixgbe_sfp_type_srlr_core1:
294 case ixgbe_sfp_type_1g_sx_core0:
295 case ixgbe_sfp_type_1g_sx_core1:
296 case ixgbe_sfp_type_1g_lx_core0:
297 case ixgbe_sfp_type_1g_lx_core1:
298 supported |= SUPPORTED_FIBRE;
299 advertising |= ADVERTISED_FIBRE;
300 cmd->base.port = PORT_FIBRE;
302 case ixgbe_sfp_type_not_present:
303 supported |= SUPPORTED_FIBRE;
304 advertising |= ADVERTISED_FIBRE;
305 cmd->base.port = PORT_NONE;
307 case ixgbe_sfp_type_1g_cu_core0:
308 case ixgbe_sfp_type_1g_cu_core1:
309 supported |= SUPPORTED_TP;
310 advertising |= ADVERTISED_TP;
311 cmd->base.port = PORT_TP;
313 case ixgbe_sfp_type_unknown:
315 supported |= SUPPORTED_FIBRE;
316 advertising |= ADVERTISED_FIBRE;
317 cmd->base.port = PORT_OTHER;
322 supported |= SUPPORTED_FIBRE;
323 advertising |= ADVERTISED_FIBRE;
324 cmd->base.port = PORT_NONE;
326 case ixgbe_phy_unknown:
327 case ixgbe_phy_generic:
328 case ixgbe_phy_sfp_unsupported:
330 supported |= SUPPORTED_FIBRE;
331 advertising |= ADVERTISED_FIBRE;
332 cmd->base.port = PORT_OTHER;
336 /* Indicate pause support */
337 supported |= SUPPORTED_Pause;
339 switch (hw->fc.requested_mode) {
341 advertising |= ADVERTISED_Pause;
343 case ixgbe_fc_rx_pause:
344 advertising |= ADVERTISED_Pause |
345 ADVERTISED_Asym_Pause;
347 case ixgbe_fc_tx_pause:
348 advertising |= ADVERTISED_Asym_Pause;
351 advertising &= ~(ADVERTISED_Pause |
352 ADVERTISED_Asym_Pause);
355 if (netif_carrier_ok(netdev)) {
356 switch (adapter->link_speed) {
357 case IXGBE_LINK_SPEED_10GB_FULL:
358 cmd->base.speed = SPEED_10000;
360 case IXGBE_LINK_SPEED_5GB_FULL:
361 cmd->base.speed = SPEED_5000;
363 case IXGBE_LINK_SPEED_2_5GB_FULL:
364 cmd->base.speed = SPEED_2500;
366 case IXGBE_LINK_SPEED_1GB_FULL:
367 cmd->base.speed = SPEED_1000;
369 case IXGBE_LINK_SPEED_100_FULL:
370 cmd->base.speed = SPEED_100;
372 case IXGBE_LINK_SPEED_10_FULL:
373 cmd->base.speed = SPEED_10;
378 cmd->base.duplex = DUPLEX_FULL;
380 cmd->base.speed = SPEED_UNKNOWN;
381 cmd->base.duplex = DUPLEX_UNKNOWN;
384 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
386 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
392 static int ixgbe_set_link_ksettings(struct net_device *netdev,
393 const struct ethtool_link_ksettings *cmd)
395 struct ixgbe_adapter *adapter = netdev_priv(netdev);
396 struct ixgbe_hw *hw = &adapter->hw;
399 u32 supported, advertising;
401 ethtool_convert_link_mode_to_legacy_u32(&supported,
402 cmd->link_modes.supported);
403 ethtool_convert_link_mode_to_legacy_u32(&advertising,
404 cmd->link_modes.advertising);
406 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
407 (hw->phy.multispeed_fiber)) {
409 * this function does not support duplex forcing, but can
410 * limit the advertising of the adapter to the specified speed
412 if (advertising & ~supported)
415 /* only allow one speed at a time if no autoneg */
416 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
418 (ADVERTISED_10000baseT_Full |
419 ADVERTISED_1000baseT_Full))
423 old = hw->phy.autoneg_advertised;
425 if (advertising & ADVERTISED_10000baseT_Full)
426 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
428 if (advertising & ADVERTISED_1000baseT_Full)
429 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
431 if (advertising & ADVERTISED_100baseT_Full)
432 advertised |= IXGBE_LINK_SPEED_100_FULL;
434 if (advertising & ADVERTISED_10baseT_Full)
435 advertised |= IXGBE_LINK_SPEED_10_FULL;
437 if (old == advertised)
439 /* this sets the link speed and restarts auto-neg */
440 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
441 usleep_range(1000, 2000);
443 hw->mac.autotry_restart = true;
444 err = hw->mac.ops.setup_link(hw, advertised, true);
446 e_info(probe, "setup link failed with code %d\n", err);
447 hw->mac.ops.setup_link(hw, old, true);
449 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
451 /* in this case we currently only support 10Gb/FULL */
452 u32 speed = cmd->base.speed;
454 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
455 (advertising != ADVERTISED_10000baseT_Full) ||
456 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
463 static void ixgbe_get_pauseparam(struct net_device *netdev,
464 struct ethtool_pauseparam *pause)
466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
467 struct ixgbe_hw *hw = &adapter->hw;
469 if (ixgbe_device_supports_autoneg_fc(hw) &&
470 !hw->fc.disable_fc_autoneg)
475 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
477 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
479 } else if (hw->fc.current_mode == ixgbe_fc_full) {
485 static int ixgbe_set_pauseparam(struct net_device *netdev,
486 struct ethtool_pauseparam *pause)
488 struct ixgbe_adapter *adapter = netdev_priv(netdev);
489 struct ixgbe_hw *hw = &adapter->hw;
490 struct ixgbe_fc_info fc = hw->fc;
492 /* 82598 does no support link flow control with DCB enabled */
493 if ((hw->mac.type == ixgbe_mac_82598EB) &&
494 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
497 /* some devices do not support autoneg of link flow control */
498 if ((pause->autoneg == AUTONEG_ENABLE) &&
499 !ixgbe_device_supports_autoneg_fc(hw))
502 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
504 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
505 fc.requested_mode = ixgbe_fc_full;
506 else if (pause->rx_pause && !pause->tx_pause)
507 fc.requested_mode = ixgbe_fc_rx_pause;
508 else if (!pause->rx_pause && pause->tx_pause)
509 fc.requested_mode = ixgbe_fc_tx_pause;
511 fc.requested_mode = ixgbe_fc_none;
513 /* if the thing changed then we'll update and use new autoneg */
514 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
516 if (netif_running(netdev))
517 ixgbe_reinit_locked(adapter);
519 ixgbe_reset(adapter);
525 static u32 ixgbe_get_msglevel(struct net_device *netdev)
527 struct ixgbe_adapter *adapter = netdev_priv(netdev);
528 return adapter->msg_enable;
531 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
533 struct ixgbe_adapter *adapter = netdev_priv(netdev);
534 adapter->msg_enable = data;
537 static int ixgbe_get_regs_len(struct net_device *netdev)
539 #define IXGBE_REGS_LEN 1139
540 return IXGBE_REGS_LEN * sizeof(u32);
543 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
545 static void ixgbe_get_regs(struct net_device *netdev,
546 struct ethtool_regs *regs, void *p)
548 struct ixgbe_adapter *adapter = netdev_priv(netdev);
549 struct ixgbe_hw *hw = &adapter->hw;
553 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
555 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
558 /* General Registers */
559 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
560 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
561 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
562 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
563 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
564 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
565 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
566 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
569 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
570 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
571 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
572 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
573 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
574 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
575 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
576 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
577 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
578 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
581 /* don't read EICR because it can clear interrupt causes, instead
582 * read EICS which is a shadow but doesn't clear EICR */
583 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
584 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
585 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
586 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
587 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
588 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
589 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
590 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
591 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
592 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
593 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
594 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
597 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
598 for (i = 0; i < 4; i++)
599 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
600 for (i = 0; i < 8; i++) {
601 switch (hw->mac.type) {
602 case ixgbe_mac_82598EB:
603 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
604 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
606 case ixgbe_mac_82599EB:
609 case ixgbe_mac_X550EM_x:
610 case ixgbe_mac_x550em_a:
611 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
612 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
618 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
619 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
622 for (i = 0; i < 64; i++)
623 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
624 for (i = 0; i < 64; i++)
625 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
626 for (i = 0; i < 64; i++)
627 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
628 for (i = 0; i < 64; i++)
629 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
630 for (i = 0; i < 64; i++)
631 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
632 for (i = 0; i < 64; i++)
633 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
634 for (i = 0; i < 16; i++)
635 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
636 for (i = 0; i < 16; i++)
637 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
638 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
639 for (i = 0; i < 8; i++)
640 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
641 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
642 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
645 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
646 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
647 for (i = 0; i < 16; i++)
648 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
649 for (i = 0; i < 16; i++)
650 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
651 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
652 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
653 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
654 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
655 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
656 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
657 for (i = 0; i < 8; i++)
658 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
659 for (i = 0; i < 8; i++)
660 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
661 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
664 for (i = 0; i < 32; i++)
665 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
666 for (i = 0; i < 32; i++)
667 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
668 for (i = 0; i < 32; i++)
669 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
670 for (i = 0; i < 32; i++)
671 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
672 for (i = 0; i < 32; i++)
673 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
674 for (i = 0; i < 32; i++)
675 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
676 for (i = 0; i < 32; i++)
677 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
678 for (i = 0; i < 32; i++)
679 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
680 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
681 for (i = 0; i < 16; i++)
682 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
683 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
684 for (i = 0; i < 8; i++)
685 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
686 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
689 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
690 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
691 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
692 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
693 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
694 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
695 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
696 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
697 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
700 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
701 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
703 switch (hw->mac.type) {
704 case ixgbe_mac_82598EB:
705 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
706 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
707 for (i = 0; i < 8; i++)
709 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
710 for (i = 0; i < 8; i++)
712 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
713 for (i = 0; i < 8; i++)
715 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
716 for (i = 0; i < 8; i++)
718 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
720 case ixgbe_mac_82599EB:
723 case ixgbe_mac_X550EM_x:
724 case ixgbe_mac_x550em_a:
725 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
726 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
727 for (i = 0; i < 8; i++)
729 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
730 for (i = 0; i < 8; i++)
732 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
733 for (i = 0; i < 8; i++)
735 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
736 for (i = 0; i < 8; i++)
738 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
744 for (i = 0; i < 8; i++)
746 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
747 for (i = 0; i < 8; i++)
749 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
752 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
753 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
754 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
755 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
756 for (i = 0; i < 8; i++)
757 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
758 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
759 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
760 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
761 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
762 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
763 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
764 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
765 for (i = 0; i < 8; i++)
766 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
767 for (i = 0; i < 8; i++)
768 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
769 for (i = 0; i < 8; i++)
770 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
771 for (i = 0; i < 8; i++)
772 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
773 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
774 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
775 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
776 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
777 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
778 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
779 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
780 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
781 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
782 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
783 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
784 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
785 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
786 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
787 for (i = 0; i < 8; i++)
788 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
789 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
790 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
791 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
792 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
793 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
794 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
795 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
796 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
797 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
798 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
799 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
800 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
801 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
802 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
803 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
804 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
805 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
806 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
807 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
808 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
809 for (i = 0; i < 16; i++)
810 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
811 for (i = 0; i < 16; i++)
812 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
813 for (i = 0; i < 16; i++)
814 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
815 for (i = 0; i < 16; i++)
816 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
819 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
820 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
821 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
822 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
823 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
824 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
825 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
826 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
827 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
828 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
829 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
830 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
831 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
832 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
833 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
834 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
835 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
836 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
837 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
838 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
839 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
840 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
841 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
842 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
843 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
844 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
845 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
846 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
847 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
848 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
849 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
850 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
851 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
854 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
855 for (i = 0; i < 8; i++)
856 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
857 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
858 for (i = 0; i < 4; i++)
859 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
860 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
861 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
862 for (i = 0; i < 8; i++)
863 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
864 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
865 for (i = 0; i < 4; i++)
866 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
867 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
868 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
869 for (i = 0; i < 4; i++)
870 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
871 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
872 for (i = 0; i < 4; i++)
873 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
874 for (i = 0; i < 8; i++)
875 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
876 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
877 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
878 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
879 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
880 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
881 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
882 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
883 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
884 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
886 /* 82599 X540 specific registers */
887 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
889 /* 82599 X540 specific DCB registers */
890 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
891 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
892 for (i = 0; i < 4; i++)
893 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
894 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
895 /* same as RTTQCNRM */
896 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
897 /* same as RTTQCNRR */
899 /* X540 specific DCB registers */
900 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
901 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
904 static int ixgbe_get_eeprom_len(struct net_device *netdev)
906 struct ixgbe_adapter *adapter = netdev_priv(netdev);
907 return adapter->hw.eeprom.word_size * 2;
910 static int ixgbe_get_eeprom(struct net_device *netdev,
911 struct ethtool_eeprom *eeprom, u8 *bytes)
913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
914 struct ixgbe_hw *hw = &adapter->hw;
916 int first_word, last_word, eeprom_len;
920 if (eeprom->len == 0)
923 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
925 first_word = eeprom->offset >> 1;
926 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
927 eeprom_len = last_word - first_word + 1;
929 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
933 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
936 /* Device's eeprom is always little-endian, word addressable */
937 for (i = 0; i < eeprom_len; i++)
938 le16_to_cpus(&eeprom_buff[i]);
940 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
946 static int ixgbe_set_eeprom(struct net_device *netdev,
947 struct ethtool_eeprom *eeprom, u8 *bytes)
949 struct ixgbe_adapter *adapter = netdev_priv(netdev);
950 struct ixgbe_hw *hw = &adapter->hw;
953 int max_len, first_word, last_word, ret_val = 0;
956 if (eeprom->len == 0)
959 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
962 max_len = hw->eeprom.word_size * 2;
964 first_word = eeprom->offset >> 1;
965 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
966 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
972 if (eeprom->offset & 1) {
974 * need read/modify/write of first changed EEPROM word
975 * only the second byte of the word is being modified
977 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
983 if ((eeprom->offset + eeprom->len) & 1) {
985 * need read/modify/write of last changed EEPROM word
986 * only the first byte of the word is being modified
988 ret_val = hw->eeprom.ops.read(hw, last_word,
989 &eeprom_buff[last_word - first_word]);
994 /* Device's eeprom is always little-endian, word addressable */
995 for (i = 0; i < last_word - first_word + 1; i++)
996 le16_to_cpus(&eeprom_buff[i]);
998 memcpy(ptr, bytes, eeprom->len);
1000 for (i = 0; i < last_word - first_word + 1; i++)
1001 cpu_to_le16s(&eeprom_buff[i]);
1003 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1004 last_word - first_word + 1,
1007 /* Update the checksum */
1009 hw->eeprom.ops.update_checksum(hw);
1016 static void ixgbe_get_drvinfo(struct net_device *netdev,
1017 struct ethtool_drvinfo *drvinfo)
1019 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1021 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1022 strlcpy(drvinfo->version, ixgbe_driver_version,
1023 sizeof(drvinfo->version));
1025 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1026 sizeof(drvinfo->fw_version));
1028 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1029 sizeof(drvinfo->bus_info));
1031 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1034 static void ixgbe_get_ringparam(struct net_device *netdev,
1035 struct ethtool_ringparam *ring)
1037 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1038 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1039 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1041 ring->rx_max_pending = IXGBE_MAX_RXD;
1042 ring->tx_max_pending = IXGBE_MAX_TXD;
1043 ring->rx_pending = rx_ring->count;
1044 ring->tx_pending = tx_ring->count;
1047 static int ixgbe_set_ringparam(struct net_device *netdev,
1048 struct ethtool_ringparam *ring)
1050 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1051 struct ixgbe_ring *temp_ring;
1053 u32 new_rx_count, new_tx_count;
1055 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1058 new_tx_count = clamp_t(u32, ring->tx_pending,
1059 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1060 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1062 new_rx_count = clamp_t(u32, ring->rx_pending,
1063 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1064 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1066 if ((new_tx_count == adapter->tx_ring_count) &&
1067 (new_rx_count == adapter->rx_ring_count)) {
1072 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1073 usleep_range(1000, 2000);
1075 if (!netif_running(adapter->netdev)) {
1076 for (i = 0; i < adapter->num_tx_queues; i++)
1077 adapter->tx_ring[i]->count = new_tx_count;
1078 for (i = 0; i < adapter->num_xdp_queues; i++)
1079 adapter->xdp_ring[i]->count = new_tx_count;
1080 for (i = 0; i < adapter->num_rx_queues; i++)
1081 adapter->rx_ring[i]->count = new_rx_count;
1082 adapter->tx_ring_count = new_tx_count;
1083 adapter->xdp_ring_count = new_tx_count;
1084 adapter->rx_ring_count = new_rx_count;
1088 /* allocate temporary buffer to store rings in */
1089 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1090 adapter->num_rx_queues);
1091 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1098 ixgbe_down(adapter);
1101 * Setup new Tx resources and free the old Tx resources in that order.
1102 * We can then assign the new resources to the rings via a memcpy.
1103 * The advantage to this approach is that we are guaranteed to still
1104 * have resources even in the case of an allocation failure.
1106 if (new_tx_count != adapter->tx_ring_count) {
1107 for (i = 0; i < adapter->num_tx_queues; i++) {
1108 memcpy(&temp_ring[i], adapter->tx_ring[i],
1109 sizeof(struct ixgbe_ring));
1111 temp_ring[i].count = new_tx_count;
1112 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1116 ixgbe_free_tx_resources(&temp_ring[i]);
1122 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1123 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1124 sizeof(struct ixgbe_ring));
1126 temp_ring[i].count = new_tx_count;
1127 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1131 ixgbe_free_tx_resources(&temp_ring[i]);
1137 for (i = 0; i < adapter->num_tx_queues; i++) {
1138 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1140 memcpy(adapter->tx_ring[i], &temp_ring[i],
1141 sizeof(struct ixgbe_ring));
1143 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1144 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1146 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1147 sizeof(struct ixgbe_ring));
1150 adapter->tx_ring_count = new_tx_count;
1153 /* Repeat the process for the Rx rings if needed */
1154 if (new_rx_count != adapter->rx_ring_count) {
1155 for (i = 0; i < adapter->num_rx_queues; i++) {
1156 memcpy(&temp_ring[i], adapter->rx_ring[i],
1157 sizeof(struct ixgbe_ring));
1159 /* Clear copied XDP RX-queue info */
1160 memset(&temp_ring[i].xdp_rxq, 0,
1161 sizeof(temp_ring[i].xdp_rxq));
1163 temp_ring[i].count = new_rx_count;
1164 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1168 ixgbe_free_rx_resources(&temp_ring[i]);
1175 for (i = 0; i < adapter->num_rx_queues; i++) {
1176 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1178 memcpy(adapter->rx_ring[i], &temp_ring[i],
1179 sizeof(struct ixgbe_ring));
1182 adapter->rx_ring_count = new_rx_count;
1189 clear_bit(__IXGBE_RESETTING, &adapter->state);
1193 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1197 return IXGBE_TEST_LEN;
1199 return IXGBE_STATS_LEN;
1200 case ETH_SS_PRIV_FLAGS:
1201 return IXGBE_PRIV_FLAGS_STR_LEN;
1207 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1208 struct ethtool_stats *stats, u64 *data)
1210 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1211 struct rtnl_link_stats64 temp;
1212 const struct rtnl_link_stats64 *net_stats;
1214 struct ixgbe_ring *ring;
1218 ixgbe_update_stats(adapter);
1219 net_stats = dev_get_stats(netdev, &temp);
1220 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1221 switch (ixgbe_gstrings_stats[i].type) {
1223 p = (char *) net_stats +
1224 ixgbe_gstrings_stats[i].stat_offset;
1227 p = (char *) adapter +
1228 ixgbe_gstrings_stats[i].stat_offset;
1235 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1236 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1238 for (j = 0; j < netdev->num_tx_queues; j++) {
1239 ring = adapter->tx_ring[j];
1248 start = u64_stats_fetch_begin_irq(&ring->syncp);
1249 data[i] = ring->stats.packets;
1250 data[i+1] = ring->stats.bytes;
1251 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1254 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1255 ring = adapter->rx_ring[j];
1264 start = u64_stats_fetch_begin_irq(&ring->syncp);
1265 data[i] = ring->stats.packets;
1266 data[i+1] = ring->stats.bytes;
1267 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1271 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1272 data[i++] = adapter->stats.pxontxc[j];
1273 data[i++] = adapter->stats.pxofftxc[j];
1275 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1276 data[i++] = adapter->stats.pxonrxc[j];
1277 data[i++] = adapter->stats.pxoffrxc[j];
1281 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1284 char *p = (char *)data;
1287 switch (stringset) {
1289 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1290 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1291 data += ETH_GSTRING_LEN;
1295 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1296 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1298 p += ETH_GSTRING_LEN;
1300 for (i = 0; i < netdev->num_tx_queues; i++) {
1301 sprintf(p, "tx_queue_%u_packets", i);
1302 p += ETH_GSTRING_LEN;
1303 sprintf(p, "tx_queue_%u_bytes", i);
1304 p += ETH_GSTRING_LEN;
1306 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1307 sprintf(p, "rx_queue_%u_packets", i);
1308 p += ETH_GSTRING_LEN;
1309 sprintf(p, "rx_queue_%u_bytes", i);
1310 p += ETH_GSTRING_LEN;
1312 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1313 sprintf(p, "tx_pb_%u_pxon", i);
1314 p += ETH_GSTRING_LEN;
1315 sprintf(p, "tx_pb_%u_pxoff", i);
1316 p += ETH_GSTRING_LEN;
1318 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1319 sprintf(p, "rx_pb_%u_pxon", i);
1320 p += ETH_GSTRING_LEN;
1321 sprintf(p, "rx_pb_%u_pxoff", i);
1322 p += ETH_GSTRING_LEN;
1324 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1326 case ETH_SS_PRIV_FLAGS:
1327 memcpy(data, ixgbe_priv_flags_strings,
1328 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1332 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1334 struct ixgbe_hw *hw = &adapter->hw;
1338 if (ixgbe_removed(hw->hw_addr)) {
1344 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1352 /* ethtool register test data */
1353 struct ixgbe_reg_test {
1361 /* In the hardware, registers are laid out either singly, in arrays
1362 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1363 * most tests take place on arrays or single registers (handled
1364 * as a single-element array) and special-case the tables.
1365 * Table tests are always pattern tests.
1367 * We also make provision for some required setup steps by specifying
1368 * registers to be written without any read-back testing.
1371 #define PATTERN_TEST 1
1372 #define SET_READ_TEST 2
1373 #define WRITE_NO_TEST 3
1374 #define TABLE32_TEST 4
1375 #define TABLE64_TEST_LO 5
1376 #define TABLE64_TEST_HI 6
1378 /* default 82599 register test */
1379 static const struct ixgbe_reg_test reg_test_82599[] = {
1380 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1381 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1382 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1383 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1384 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1385 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1386 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1387 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1388 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1389 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1390 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1391 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1392 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1393 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1394 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1395 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1396 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1397 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1398 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1402 /* default 82598 register test */
1403 static const struct ixgbe_reg_test reg_test_82598[] = {
1404 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1405 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1406 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1407 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1408 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1409 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1410 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1411 /* Enable all four RX queues before testing. */
1412 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1413 /* RDH is read-only for 82598, only test RDT. */
1414 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1415 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1416 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1417 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1418 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1419 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1420 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1421 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1422 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1423 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1424 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1425 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1426 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1430 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1431 u32 mask, u32 write)
1433 u32 pat, val, before;
1434 static const u32 test_pattern[] = {
1435 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1437 if (ixgbe_removed(adapter->hw.hw_addr)) {
1441 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1442 before = ixgbe_read_reg(&adapter->hw, reg);
1443 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1444 val = ixgbe_read_reg(&adapter->hw, reg);
1445 if (val != (test_pattern[pat] & write & mask)) {
1446 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1447 reg, val, (test_pattern[pat] & write & mask));
1449 ixgbe_write_reg(&adapter->hw, reg, before);
1452 ixgbe_write_reg(&adapter->hw, reg, before);
1457 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1458 u32 mask, u32 write)
1462 if (ixgbe_removed(adapter->hw.hw_addr)) {
1466 before = ixgbe_read_reg(&adapter->hw, reg);
1467 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1468 val = ixgbe_read_reg(&adapter->hw, reg);
1469 if ((write & mask) != (val & mask)) {
1470 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1471 reg, (val & mask), (write & mask));
1473 ixgbe_write_reg(&adapter->hw, reg, before);
1476 ixgbe_write_reg(&adapter->hw, reg, before);
1480 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1482 const struct ixgbe_reg_test *test;
1483 u32 value, before, after;
1486 if (ixgbe_removed(adapter->hw.hw_addr)) {
1487 e_err(drv, "Adapter removed - register test blocked\n");
1491 switch (adapter->hw.mac.type) {
1492 case ixgbe_mac_82598EB:
1493 toggle = 0x7FFFF3FF;
1494 test = reg_test_82598;
1496 case ixgbe_mac_82599EB:
1497 case ixgbe_mac_X540:
1498 case ixgbe_mac_X550:
1499 case ixgbe_mac_X550EM_x:
1500 case ixgbe_mac_x550em_a:
1501 toggle = 0x7FFFF30F;
1502 test = reg_test_82599;
1510 * Because the status register is such a special case,
1511 * we handle it separately from the rest of the register
1512 * tests. Some bits are read-only, some toggle, and some
1513 * are writeable on newer MACs.
1515 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1516 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1517 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1518 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1519 if (value != after) {
1520 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1525 /* restore previous status */
1526 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1529 * Perform the remainder of the register test, looping through
1530 * the test table until we either fail or reach the null entry.
1533 for (i = 0; i < test->array_len; i++) {
1536 switch (test->test_type) {
1538 b = reg_pattern_test(adapter, data,
1539 test->reg + (i * 0x40),
1544 b = reg_set_and_check(adapter, data,
1545 test->reg + (i * 0x40),
1550 ixgbe_write_reg(&adapter->hw,
1551 test->reg + (i * 0x40),
1555 b = reg_pattern_test(adapter, data,
1556 test->reg + (i * 4),
1560 case TABLE64_TEST_LO:
1561 b = reg_pattern_test(adapter, data,
1562 test->reg + (i * 8),
1566 case TABLE64_TEST_HI:
1567 b = reg_pattern_test(adapter, data,
1568 (test->reg + 4) + (i * 8),
1583 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1585 struct ixgbe_hw *hw = &adapter->hw;
1586 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1593 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1595 struct net_device *netdev = (struct net_device *) data;
1596 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1598 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1603 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1605 struct net_device *netdev = adapter->netdev;
1606 u32 mask, i = 0, shared_int = true;
1607 u32 irq = adapter->pdev->irq;
1611 /* Hook up test interrupt handler just for this test */
1612 if (adapter->msix_entries) {
1613 /* NOTE: we don't test MSI-X interrupts here, yet */
1615 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1617 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1622 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1623 netdev->name, netdev)) {
1625 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1626 netdev->name, netdev)) {
1630 e_info(hw, "testing %s interrupt\n", shared_int ?
1631 "shared" : "unshared");
1633 /* Disable all the interrupts */
1634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1635 IXGBE_WRITE_FLUSH(&adapter->hw);
1636 usleep_range(10000, 20000);
1638 /* Test each interrupt */
1639 for (; i < 10; i++) {
1640 /* Interrupt to test */
1645 * Disable the interrupts to be reported in
1646 * the cause register and then force the same
1647 * interrupt and see if one gets posted. If
1648 * an interrupt was posted to the bus, the
1651 adapter->test_icr = 0;
1652 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1653 ~mask & 0x00007FFF);
1654 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1655 ~mask & 0x00007FFF);
1656 IXGBE_WRITE_FLUSH(&adapter->hw);
1657 usleep_range(10000, 20000);
1659 if (adapter->test_icr & mask) {
1666 * Enable the interrupt to be reported in the cause
1667 * register and then force the same interrupt and see
1668 * if one gets posted. If an interrupt was not posted
1669 * to the bus, the test failed.
1671 adapter->test_icr = 0;
1672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1674 IXGBE_WRITE_FLUSH(&adapter->hw);
1675 usleep_range(10000, 20000);
1677 if (!(adapter->test_icr & mask)) {
1684 * Disable the other interrupts to be reported in
1685 * the cause register and then force the other
1686 * interrupts and see if any get posted. If
1687 * an interrupt was posted to the bus, the
1690 adapter->test_icr = 0;
1691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1692 ~mask & 0x00007FFF);
1693 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1694 ~mask & 0x00007FFF);
1695 IXGBE_WRITE_FLUSH(&adapter->hw);
1696 usleep_range(10000, 20000);
1698 if (adapter->test_icr) {
1705 /* Disable all the interrupts */
1706 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1707 IXGBE_WRITE_FLUSH(&adapter->hw);
1708 usleep_range(10000, 20000);
1710 /* Unhook test interrupt handler */
1711 free_irq(irq, netdev);
1716 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1718 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1719 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1720 struct ixgbe_hw *hw = &adapter->hw;
1723 /* shut down the DMA engines now so they can be reinitialized later */
1726 hw->mac.ops.disable_rx(hw);
1727 ixgbe_disable_rx_queue(adapter, rx_ring);
1730 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1731 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1732 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1734 switch (hw->mac.type) {
1735 case ixgbe_mac_82599EB:
1736 case ixgbe_mac_X540:
1737 case ixgbe_mac_X550:
1738 case ixgbe_mac_X550EM_x:
1739 case ixgbe_mac_x550em_a:
1740 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1741 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1742 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1748 ixgbe_reset(adapter);
1750 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1751 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1754 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1756 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1757 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1758 struct ixgbe_hw *hw = &adapter->hw;
1763 /* Setup Tx descriptor ring and Tx buffers */
1764 tx_ring->count = IXGBE_DEFAULT_TXD;
1765 tx_ring->queue_index = 0;
1766 tx_ring->dev = &adapter->pdev->dev;
1767 tx_ring->netdev = adapter->netdev;
1768 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1770 err = ixgbe_setup_tx_resources(tx_ring);
1774 switch (adapter->hw.mac.type) {
1775 case ixgbe_mac_82599EB:
1776 case ixgbe_mac_X540:
1777 case ixgbe_mac_X550:
1778 case ixgbe_mac_X550EM_x:
1779 case ixgbe_mac_x550em_a:
1780 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1781 reg_data |= IXGBE_DMATXCTL_TE;
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1788 ixgbe_configure_tx_ring(adapter, tx_ring);
1790 /* Setup Rx Descriptor ring and Rx buffers */
1791 rx_ring->count = IXGBE_DEFAULT_RXD;
1792 rx_ring->queue_index = 0;
1793 rx_ring->dev = &adapter->pdev->dev;
1794 rx_ring->netdev = adapter->netdev;
1795 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1797 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1803 hw->mac.ops.disable_rx(hw);
1805 ixgbe_configure_rx_ring(adapter, rx_ring);
1807 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1808 rctl |= IXGBE_RXCTRL_DMBYPS;
1809 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1811 hw->mac.ops.enable_rx(hw);
1816 ixgbe_free_desc_rings(adapter);
1820 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1822 struct ixgbe_hw *hw = &adapter->hw;
1826 /* Setup MAC loopback */
1827 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1828 reg_data |= IXGBE_HLREG0_LPBK;
1829 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1831 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1832 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1833 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1835 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1836 switch (adapter->hw.mac.type) {
1837 case ixgbe_mac_X540:
1838 case ixgbe_mac_X550:
1839 case ixgbe_mac_X550EM_x:
1840 case ixgbe_mac_x550em_a:
1841 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1842 reg_data |= IXGBE_MACC_FLU;
1843 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1846 if (hw->mac.orig_autoc) {
1847 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1848 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1853 IXGBE_WRITE_FLUSH(hw);
1854 usleep_range(10000, 20000);
1856 /* Disable Atlas Tx lanes; re-enabled in reset path */
1857 if (hw->mac.type == ixgbe_mac_82598EB) {
1860 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1861 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1862 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1864 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1865 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1866 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1868 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1869 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1870 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1872 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1873 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1874 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1880 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1884 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1885 reg_data &= ~IXGBE_HLREG0_LPBK;
1886 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1889 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1890 unsigned int frame_size)
1892 memset(skb->data, 0xFF, frame_size);
1894 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1895 memset(&skb->data[frame_size + 10], 0xBE, 1);
1896 memset(&skb->data[frame_size + 12], 0xAF, 1);
1899 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1900 unsigned int frame_size)
1902 unsigned char *data;
1907 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1909 if (data[3] != 0xFF ||
1910 data[frame_size + 10] != 0xBE ||
1911 data[frame_size + 12] != 0xAF)
1914 kunmap(rx_buffer->page);
1919 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1920 struct ixgbe_ring *tx_ring,
1923 union ixgbe_adv_rx_desc *rx_desc;
1924 u16 rx_ntc, tx_ntc, count = 0;
1926 /* initialize next to clean and descriptor values */
1927 rx_ntc = rx_ring->next_to_clean;
1928 tx_ntc = tx_ring->next_to_clean;
1929 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1931 while (tx_ntc != tx_ring->next_to_use) {
1932 union ixgbe_adv_tx_desc *tx_desc;
1933 struct ixgbe_tx_buffer *tx_buffer;
1935 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
1937 /* if DD is not set transmit has not completed */
1938 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1941 /* unmap buffer on Tx side */
1942 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1944 /* Free all the Tx ring sk_buffs */
1945 dev_kfree_skb_any(tx_buffer->skb);
1947 /* unmap skb header data */
1948 dma_unmap_single(tx_ring->dev,
1949 dma_unmap_addr(tx_buffer, dma),
1950 dma_unmap_len(tx_buffer, len),
1952 dma_unmap_len_set(tx_buffer, len, 0);
1954 /* increment Tx next to clean counter */
1956 if (tx_ntc == tx_ring->count)
1960 while (rx_desc->wb.upper.length) {
1961 struct ixgbe_rx_buffer *rx_buffer;
1963 /* check Rx buffer */
1964 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1966 /* sync Rx buffer for CPU read */
1967 dma_sync_single_for_cpu(rx_ring->dev,
1969 ixgbe_rx_bufsz(rx_ring),
1972 /* verify contents of skb */
1973 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1978 /* sync Rx buffer for device write */
1979 dma_sync_single_for_device(rx_ring->dev,
1981 ixgbe_rx_bufsz(rx_ring),
1984 /* increment Rx next to clean counter */
1986 if (rx_ntc == rx_ring->count)
1989 /* fetch next descriptor */
1990 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1993 netdev_tx_reset_queue(txring_txq(tx_ring));
1995 /* re-map buffers to ring, store next to clean values */
1996 ixgbe_alloc_rx_buffers(rx_ring, count);
1997 rx_ring->next_to_clean = rx_ntc;
1998 tx_ring->next_to_clean = tx_ntc;
2003 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2005 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2006 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2007 int i, j, lc, good_cnt, ret_val = 0;
2008 unsigned int size = 1024;
2009 netdev_tx_t tx_ret_val;
2010 struct sk_buff *skb;
2011 u32 flags_orig = adapter->flags;
2013 /* DCB can modify the frames on Tx */
2014 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2016 /* allocate test skb */
2017 skb = alloc_skb(size, GFP_KERNEL);
2021 /* place data into test skb */
2022 ixgbe_create_lbtest_frame(skb, size);
2026 * Calculate the loop count based on the largest descriptor ring
2027 * The idea is to wrap the largest ring a number of times using 64
2028 * send/receive pairs during each loop
2031 if (rx_ring->count <= tx_ring->count)
2032 lc = ((tx_ring->count / 64) * 2) + 1;
2034 lc = ((rx_ring->count / 64) * 2) + 1;
2036 for (j = 0; j <= lc; j++) {
2037 /* reset count of good packets */
2040 /* place 64 packets on the transmit queue*/
2041 for (i = 0; i < 64; i++) {
2043 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2046 if (tx_ret_val == NETDEV_TX_OK)
2050 if (good_cnt != 64) {
2055 /* allow 200 milliseconds for packets to go from Tx to Rx */
2058 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2059 if (good_cnt != 64) {
2065 /* free the original skb */
2067 adapter->flags = flags_orig;
2072 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2074 *data = ixgbe_setup_desc_rings(adapter);
2077 *data = ixgbe_setup_loopback_test(adapter);
2080 *data = ixgbe_run_loopback_test(adapter);
2081 ixgbe_loopback_cleanup(adapter);
2084 ixgbe_free_desc_rings(adapter);
2089 static void ixgbe_diag_test(struct net_device *netdev,
2090 struct ethtool_test *eth_test, u64 *data)
2092 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2093 bool if_running = netif_running(netdev);
2095 if (ixgbe_removed(adapter->hw.hw_addr)) {
2096 e_err(hw, "Adapter removed - test blocked\n");
2102 eth_test->flags |= ETH_TEST_FL_FAILED;
2105 set_bit(__IXGBE_TESTING, &adapter->state);
2106 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2107 struct ixgbe_hw *hw = &adapter->hw;
2109 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2111 for (i = 0; i < adapter->num_vfs; i++) {
2112 if (adapter->vfinfo[i].clear_to_send) {
2113 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2119 eth_test->flags |= ETH_TEST_FL_FAILED;
2120 clear_bit(__IXGBE_TESTING,
2128 e_info(hw, "offline testing starting\n");
2130 /* Link test performed before hardware reset so autoneg doesn't
2131 * interfere with test result
2133 if (ixgbe_link_test(adapter, &data[4]))
2134 eth_test->flags |= ETH_TEST_FL_FAILED;
2137 /* indicate we're in test mode */
2138 ixgbe_close(netdev);
2140 ixgbe_reset(adapter);
2142 e_info(hw, "register testing starting\n");
2143 if (ixgbe_reg_test(adapter, &data[0]))
2144 eth_test->flags |= ETH_TEST_FL_FAILED;
2146 ixgbe_reset(adapter);
2147 e_info(hw, "eeprom testing starting\n");
2148 if (ixgbe_eeprom_test(adapter, &data[1]))
2149 eth_test->flags |= ETH_TEST_FL_FAILED;
2151 ixgbe_reset(adapter);
2152 e_info(hw, "interrupt testing starting\n");
2153 if (ixgbe_intr_test(adapter, &data[2]))
2154 eth_test->flags |= ETH_TEST_FL_FAILED;
2156 /* If SRIOV or VMDq is enabled then skip MAC
2157 * loopback diagnostic. */
2158 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2159 IXGBE_FLAG_VMDQ_ENABLED)) {
2160 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2165 ixgbe_reset(adapter);
2166 e_info(hw, "loopback testing starting\n");
2167 if (ixgbe_loopback_test(adapter, &data[3]))
2168 eth_test->flags |= ETH_TEST_FL_FAILED;
2171 ixgbe_reset(adapter);
2173 /* clear testing bit and return adapter to previous state */
2174 clear_bit(__IXGBE_TESTING, &adapter->state);
2177 else if (hw->mac.ops.disable_tx_laser)
2178 hw->mac.ops.disable_tx_laser(hw);
2180 e_info(hw, "online testing starting\n");
2183 if (ixgbe_link_test(adapter, &data[4]))
2184 eth_test->flags |= ETH_TEST_FL_FAILED;
2186 /* Offline tests aren't run; pass by default */
2192 clear_bit(__IXGBE_TESTING, &adapter->state);
2196 msleep_interruptible(4 * 1000);
2199 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2200 struct ethtool_wolinfo *wol)
2202 struct ixgbe_hw *hw = &adapter->hw;
2205 /* WOL not supported for all devices */
2206 if (!ixgbe_wol_supported(adapter, hw->device_id,
2207 hw->subsystem_device_id)) {
2215 static void ixgbe_get_wol(struct net_device *netdev,
2216 struct ethtool_wolinfo *wol)
2218 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2220 wol->supported = WAKE_UCAST | WAKE_MCAST |
2221 WAKE_BCAST | WAKE_MAGIC;
2224 if (ixgbe_wol_exclusion(adapter, wol) ||
2225 !device_can_wakeup(&adapter->pdev->dev))
2228 if (adapter->wol & IXGBE_WUFC_EX)
2229 wol->wolopts |= WAKE_UCAST;
2230 if (adapter->wol & IXGBE_WUFC_MC)
2231 wol->wolopts |= WAKE_MCAST;
2232 if (adapter->wol & IXGBE_WUFC_BC)
2233 wol->wolopts |= WAKE_BCAST;
2234 if (adapter->wol & IXGBE_WUFC_MAG)
2235 wol->wolopts |= WAKE_MAGIC;
2238 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2240 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2242 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2245 if (ixgbe_wol_exclusion(adapter, wol))
2246 return wol->wolopts ? -EOPNOTSUPP : 0;
2250 if (wol->wolopts & WAKE_UCAST)
2251 adapter->wol |= IXGBE_WUFC_EX;
2252 if (wol->wolopts & WAKE_MCAST)
2253 adapter->wol |= IXGBE_WUFC_MC;
2254 if (wol->wolopts & WAKE_BCAST)
2255 adapter->wol |= IXGBE_WUFC_BC;
2256 if (wol->wolopts & WAKE_MAGIC)
2257 adapter->wol |= IXGBE_WUFC_MAG;
2259 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2264 static int ixgbe_nway_reset(struct net_device *netdev)
2266 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2268 if (netif_running(netdev))
2269 ixgbe_reinit_locked(adapter);
2274 static int ixgbe_set_phys_id(struct net_device *netdev,
2275 enum ethtool_phys_id_state state)
2277 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2278 struct ixgbe_hw *hw = &adapter->hw;
2280 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2284 case ETHTOOL_ID_ACTIVE:
2285 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2289 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2292 case ETHTOOL_ID_OFF:
2293 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2296 case ETHTOOL_ID_INACTIVE:
2297 /* Restore LED settings */
2298 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2305 static int ixgbe_get_coalesce(struct net_device *netdev,
2306 struct ethtool_coalesce *ec)
2308 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2310 /* only valid if in constant ITR mode */
2311 if (adapter->rx_itr_setting <= 1)
2312 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2314 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2316 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2317 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2320 /* only valid if in constant ITR mode */
2321 if (adapter->tx_itr_setting <= 1)
2322 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2324 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2330 * this function must be called before setting the new value of
2333 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2335 struct net_device *netdev = adapter->netdev;
2337 /* nothing to do if LRO or RSC are not enabled */
2338 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2339 !(netdev->features & NETIF_F_LRO))
2342 /* check the feature flag value and enable RSC if necessary */
2343 if (adapter->rx_itr_setting == 1 ||
2344 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2345 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2346 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2347 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2350 /* if interrupt rate is too high then disable RSC */
2351 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2352 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2353 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2359 static int ixgbe_set_coalesce(struct net_device *netdev,
2360 struct ethtool_coalesce *ec)
2362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2363 struct ixgbe_q_vector *q_vector;
2365 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2366 bool need_reset = false;
2368 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2369 /* reject Tx specific changes in case of mixed RxTx vectors */
2370 if (ec->tx_coalesce_usecs)
2372 tx_itr_prev = adapter->rx_itr_setting;
2374 tx_itr_prev = adapter->tx_itr_setting;
2377 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2378 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2381 if (ec->rx_coalesce_usecs > 1)
2382 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2384 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2386 if (adapter->rx_itr_setting == 1)
2387 rx_itr_param = IXGBE_20K_ITR;
2389 rx_itr_param = adapter->rx_itr_setting;
2391 if (ec->tx_coalesce_usecs > 1)
2392 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2394 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2396 if (adapter->tx_itr_setting == 1)
2397 tx_itr_param = IXGBE_12K_ITR;
2399 tx_itr_param = adapter->tx_itr_setting;
2402 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2403 adapter->tx_itr_setting = adapter->rx_itr_setting;
2405 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2406 if ((adapter->tx_itr_setting != 1) &&
2407 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2408 if ((tx_itr_prev == 1) ||
2409 (tx_itr_prev >= IXGBE_100K_ITR))
2412 if ((tx_itr_prev != 1) &&
2413 (tx_itr_prev < IXGBE_100K_ITR))
2417 /* check the old value and enable RSC if necessary */
2418 need_reset |= ixgbe_update_rsc(adapter);
2420 for (i = 0; i < adapter->num_q_vectors; i++) {
2421 q_vector = adapter->q_vector[i];
2422 if (q_vector->tx.count && !q_vector->rx.count)
2424 q_vector->itr = tx_itr_param;
2426 /* rx only or mixed */
2427 q_vector->itr = rx_itr_param;
2428 ixgbe_write_eitr(q_vector);
2432 * do reset here at the end to make sure EITR==0 case is handled
2433 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2434 * also locks in RSC enable/disable which requires reset
2437 ixgbe_do_reset(netdev);
2442 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2443 struct ethtool_rxnfc *cmd)
2445 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2446 struct ethtool_rx_flow_spec *fsp =
2447 (struct ethtool_rx_flow_spec *)&cmd->fs;
2448 struct hlist_node *node2;
2449 struct ixgbe_fdir_filter *rule = NULL;
2451 /* report total rule count */
2452 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2454 hlist_for_each_entry_safe(rule, node2,
2455 &adapter->fdir_filter_list, fdir_node) {
2456 if (fsp->location <= rule->sw_idx)
2460 if (!rule || fsp->location != rule->sw_idx)
2463 /* fill out the flow spec entry */
2465 /* set flow type field */
2466 switch (rule->filter.formatted.flow_type) {
2467 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2468 fsp->flow_type = TCP_V4_FLOW;
2470 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2471 fsp->flow_type = UDP_V4_FLOW;
2473 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2474 fsp->flow_type = SCTP_V4_FLOW;
2476 case IXGBE_ATR_FLOW_TYPE_IPV4:
2477 fsp->flow_type = IP_USER_FLOW;
2478 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2479 fsp->h_u.usr_ip4_spec.proto = 0;
2480 fsp->m_u.usr_ip4_spec.proto = 0;
2486 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2487 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2488 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2489 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2490 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2491 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2492 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2493 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2494 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2495 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2496 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2497 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2498 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2499 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2500 fsp->flow_type |= FLOW_EXT;
2503 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2504 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2506 fsp->ring_cookie = rule->action;
2511 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2512 struct ethtool_rxnfc *cmd,
2515 struct hlist_node *node2;
2516 struct ixgbe_fdir_filter *rule;
2519 /* report total rule count */
2520 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2522 hlist_for_each_entry_safe(rule, node2,
2523 &adapter->fdir_filter_list, fdir_node) {
2524 if (cnt == cmd->rule_cnt)
2526 rule_locs[cnt] = rule->sw_idx;
2530 cmd->rule_cnt = cnt;
2535 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2536 struct ethtool_rxnfc *cmd)
2540 /* Report default options for RSS on ixgbe */
2541 switch (cmd->flow_type) {
2543 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2546 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2547 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2550 case AH_ESP_V4_FLOW:
2554 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2557 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2560 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2561 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2564 case AH_ESP_V6_FLOW:
2568 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2577 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2580 struct ixgbe_adapter *adapter = netdev_priv(dev);
2581 int ret = -EOPNOTSUPP;
2584 case ETHTOOL_GRXRINGS:
2585 cmd->data = adapter->num_rx_queues;
2588 case ETHTOOL_GRXCLSRLCNT:
2589 cmd->rule_cnt = adapter->fdir_filter_count;
2592 case ETHTOOL_GRXCLSRULE:
2593 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2595 case ETHTOOL_GRXCLSRLALL:
2596 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2599 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2608 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2609 struct ixgbe_fdir_filter *input,
2612 struct ixgbe_hw *hw = &adapter->hw;
2613 struct hlist_node *node2;
2614 struct ixgbe_fdir_filter *rule, *parent;
2620 hlist_for_each_entry_safe(rule, node2,
2621 &adapter->fdir_filter_list, fdir_node) {
2622 /* hash found, or no matching entry */
2623 if (rule->sw_idx >= sw_idx)
2628 /* if there is an old rule occupying our place remove it */
2629 if (rule && (rule->sw_idx == sw_idx)) {
2630 if (!input || (rule->filter.formatted.bkt_hash !=
2631 input->filter.formatted.bkt_hash)) {
2632 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2637 hlist_del(&rule->fdir_node);
2639 adapter->fdir_filter_count--;
2643 * If no input this was a delete, err should be 0 if a rule was
2644 * successfully found and removed from the list else -EINVAL
2649 /* initialize node and set software index */
2650 INIT_HLIST_NODE(&input->fdir_node);
2652 /* add filter to the list */
2654 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2656 hlist_add_head(&input->fdir_node,
2657 &adapter->fdir_filter_list);
2660 adapter->fdir_filter_count++;
2665 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2668 switch (fsp->flow_type & ~FLOW_EXT) {
2670 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2673 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2676 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2679 switch (fsp->h_u.usr_ip4_spec.proto) {
2681 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2684 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2687 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2690 if (!fsp->m_u.usr_ip4_spec.proto) {
2691 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2706 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2707 struct ethtool_rxnfc *cmd)
2709 struct ethtool_rx_flow_spec *fsp =
2710 (struct ethtool_rx_flow_spec *)&cmd->fs;
2711 struct ixgbe_hw *hw = &adapter->hw;
2712 struct ixgbe_fdir_filter *input;
2713 union ixgbe_atr_input mask;
2717 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2720 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2721 * we use the drop index.
2723 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2724 queue = IXGBE_FDIR_DROP_QUEUE;
2726 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2727 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2729 if (!vf && (ring >= adapter->num_rx_queues))
2732 ((vf > adapter->num_vfs) ||
2733 ring >= adapter->num_rx_queues_per_pool))
2736 /* Map the ring onto the absolute queue index */
2738 queue = adapter->rx_ring[ring]->reg_idx;
2741 adapter->num_rx_queues_per_pool) + ring;
2744 /* Don't allow indexes to exist outside of available space */
2745 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2746 e_err(drv, "Location out of range\n");
2750 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2754 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2757 input->sw_idx = fsp->location;
2759 /* record flow type */
2760 if (!ixgbe_flowspec_to_flow_type(fsp,
2761 &input->filter.formatted.flow_type)) {
2762 e_err(drv, "Unrecognized flow type\n");
2766 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2767 IXGBE_ATR_L4TYPE_MASK;
2769 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2770 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2772 /* Copy input into formatted structures */
2773 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2774 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2775 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2776 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2777 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2778 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2779 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2780 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2782 if (fsp->flow_type & FLOW_EXT) {
2783 input->filter.formatted.vm_pool =
2784 (unsigned char)ntohl(fsp->h_ext.data[1]);
2785 mask.formatted.vm_pool =
2786 (unsigned char)ntohl(fsp->m_ext.data[1]);
2787 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2788 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2789 input->filter.formatted.flex_bytes =
2790 fsp->h_ext.vlan_etype;
2791 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2794 /* determine if we need to drop or route the packet */
2795 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2796 input->action = IXGBE_FDIR_DROP_QUEUE;
2798 input->action = fsp->ring_cookie;
2800 spin_lock(&adapter->fdir_perfect_lock);
2802 if (hlist_empty(&adapter->fdir_filter_list)) {
2803 /* save mask and program input mask into HW */
2804 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2805 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2807 e_err(drv, "Error writing mask\n");
2808 goto err_out_w_lock;
2810 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2811 e_err(drv, "Only one mask supported per port\n");
2812 goto err_out_w_lock;
2815 /* apply mask and compute/store hash */
2816 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2818 /* program filters to filter memory */
2819 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2820 &input->filter, input->sw_idx, queue);
2822 goto err_out_w_lock;
2824 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2826 spin_unlock(&adapter->fdir_perfect_lock);
2830 spin_unlock(&adapter->fdir_perfect_lock);
2836 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2837 struct ethtool_rxnfc *cmd)
2839 struct ethtool_rx_flow_spec *fsp =
2840 (struct ethtool_rx_flow_spec *)&cmd->fs;
2843 spin_lock(&adapter->fdir_perfect_lock);
2844 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2845 spin_unlock(&adapter->fdir_perfect_lock);
2850 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2851 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2852 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2853 struct ethtool_rxnfc *nfc)
2855 u32 flags2 = adapter->flags2;
2858 * RSS does not support anything other than hashing
2859 * to queues on src and dst IPs and ports
2861 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2862 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2865 switch (nfc->flow_type) {
2868 if (!(nfc->data & RXH_IP_SRC) ||
2869 !(nfc->data & RXH_IP_DST) ||
2870 !(nfc->data & RXH_L4_B_0_1) ||
2871 !(nfc->data & RXH_L4_B_2_3))
2875 if (!(nfc->data & RXH_IP_SRC) ||
2876 !(nfc->data & RXH_IP_DST))
2878 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2880 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2882 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2883 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2890 if (!(nfc->data & RXH_IP_SRC) ||
2891 !(nfc->data & RXH_IP_DST))
2893 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2895 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2897 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2898 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2904 case AH_ESP_V4_FLOW:
2908 case AH_ESP_V6_FLOW:
2912 if (!(nfc->data & RXH_IP_SRC) ||
2913 !(nfc->data & RXH_IP_DST) ||
2914 (nfc->data & RXH_L4_B_0_1) ||
2915 (nfc->data & RXH_L4_B_2_3))
2922 /* if we changed something we need to update flags */
2923 if (flags2 != adapter->flags2) {
2924 struct ixgbe_hw *hw = &adapter->hw;
2926 unsigned int pf_pool = adapter->num_vfs;
2928 if ((hw->mac.type >= ixgbe_mac_X550) &&
2929 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2930 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2932 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2934 if ((flags2 & UDP_RSS_FLAGS) &&
2935 !(adapter->flags2 & UDP_RSS_FLAGS))
2936 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2938 adapter->flags2 = flags2;
2940 /* Perform hash on these packet types */
2941 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2942 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2943 | IXGBE_MRQC_RSS_FIELD_IPV6
2944 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2946 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2947 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2949 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2950 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2952 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2955 if ((hw->mac.type >= ixgbe_mac_X550) &&
2956 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2957 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2959 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2965 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2967 struct ixgbe_adapter *adapter = netdev_priv(dev);
2968 int ret = -EOPNOTSUPP;
2971 case ETHTOOL_SRXCLSRLINS:
2972 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2974 case ETHTOOL_SRXCLSRLDEL:
2975 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2978 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2987 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2989 if (adapter->hw.mac.type < ixgbe_mac_X550)
2995 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2997 return IXGBE_RSS_KEY_SIZE;
3000 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3002 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3004 return ixgbe_rss_indir_tbl_entries(adapter);
3007 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3009 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3010 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3012 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3013 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3015 for (i = 0; i < reta_size; i++)
3016 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3019 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3022 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3025 *hfunc = ETH_RSS_HASH_TOP;
3028 ixgbe_get_reta(adapter, indir);
3031 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3036 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3037 const u8 *key, const u8 hfunc)
3039 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3041 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3046 /* Fill out the redirection table */
3048 int max_queues = min_t(int, adapter->num_rx_queues,
3049 ixgbe_rss_indir_tbl_max(adapter));
3051 /*Allow at least 2 queues w/ SR-IOV.*/
3052 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3056 /* Verify user input. */
3057 for (i = 0; i < reta_entries; i++)
3058 if (indir[i] >= max_queues)
3061 for (i = 0; i < reta_entries; i++)
3062 adapter->rss_indir_tbl[i] = indir[i];
3064 ixgbe_store_reta(adapter);
3067 /* Fill out the rss hash key */
3069 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3070 ixgbe_store_key(adapter);
3076 static int ixgbe_get_ts_info(struct net_device *dev,
3077 struct ethtool_ts_info *info)
3079 struct ixgbe_adapter *adapter = netdev_priv(dev);
3081 /* we always support timestamping disabled */
3082 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3084 switch (adapter->hw.mac.type) {
3085 case ixgbe_mac_X550:
3086 case ixgbe_mac_X550EM_x:
3087 case ixgbe_mac_x550em_a:
3088 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3090 case ixgbe_mac_X540:
3091 case ixgbe_mac_82599EB:
3093 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3094 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3095 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3098 return ethtool_op_get_ts_info(dev, info);
3101 info->so_timestamping =
3102 SOF_TIMESTAMPING_TX_SOFTWARE |
3103 SOF_TIMESTAMPING_RX_SOFTWARE |
3104 SOF_TIMESTAMPING_SOFTWARE |
3105 SOF_TIMESTAMPING_TX_HARDWARE |
3106 SOF_TIMESTAMPING_RX_HARDWARE |
3107 SOF_TIMESTAMPING_RAW_HARDWARE;
3109 if (adapter->ptp_clock)
3110 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3112 info->phc_index = -1;
3115 BIT(HWTSTAMP_TX_OFF) |
3116 BIT(HWTSTAMP_TX_ON);
3121 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3123 unsigned int max_combined;
3124 u8 tcs = adapter->hw_tcs;
3126 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3127 /* We only support one q_vector without MSI-X */
3129 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3130 /* Limit value based on the queue mask */
3131 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3132 } else if (tcs > 1) {
3133 /* For DCB report channels per traffic class */
3134 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3135 /* 8 TC w/ 4 queues per TC */
3137 } else if (tcs > 4) {
3138 /* 8 TC w/ 8 queues per TC */
3141 /* 4 TC w/ 16 queues per TC */
3144 } else if (adapter->atr_sample_rate) {
3145 /* support up to 64 queues with ATR */
3146 max_combined = IXGBE_MAX_FDIR_INDICES;
3148 /* support up to 16 queues with RSS */
3149 max_combined = ixgbe_max_rss_indices(adapter);
3152 return max_combined;
3155 static void ixgbe_get_channels(struct net_device *dev,
3156 struct ethtool_channels *ch)
3158 struct ixgbe_adapter *adapter = netdev_priv(dev);
3160 /* report maximum channels */
3161 ch->max_combined = ixgbe_max_channels(adapter);
3163 /* report info for other vector */
3164 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3165 ch->max_other = NON_Q_VECTORS;
3166 ch->other_count = NON_Q_VECTORS;
3169 /* record RSS queues */
3170 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3172 /* nothing else to report if RSS is disabled */
3173 if (ch->combined_count == 1)
3176 /* we do not support ATR queueing if SR-IOV is enabled */
3177 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3180 /* same thing goes for being DCB enabled */
3181 if (adapter->hw_tcs > 1)
3184 /* if ATR is disabled we can exit */
3185 if (!adapter->atr_sample_rate)
3188 /* report flow director queues as maximum channels */
3189 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3192 static int ixgbe_set_channels(struct net_device *dev,
3193 struct ethtool_channels *ch)
3195 struct ixgbe_adapter *adapter = netdev_priv(dev);
3196 unsigned int count = ch->combined_count;
3197 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3199 /* verify they are not requesting separate vectors */
3200 if (!count || ch->rx_count || ch->tx_count)
3203 /* verify other_count has not changed */
3204 if (ch->other_count != NON_Q_VECTORS)
3207 /* verify the number of channels does not exceed hardware limits */
3208 if (count > ixgbe_max_channels(adapter))
3211 /* update feature limits from largest to smallest supported values */
3212 adapter->ring_feature[RING_F_FDIR].limit = count;
3215 if (count > max_rss_indices)
3216 count = max_rss_indices;
3217 adapter->ring_feature[RING_F_RSS].limit = count;
3220 /* cap FCoE limit at 8 */
3221 if (count > IXGBE_FCRETA_SIZE)
3222 count = IXGBE_FCRETA_SIZE;
3223 adapter->ring_feature[RING_F_FCOE].limit = count;
3226 /* use setup TC to update any traffic class queue mapping */
3227 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3230 static int ixgbe_get_module_info(struct net_device *dev,
3231 struct ethtool_modinfo *modinfo)
3233 struct ixgbe_adapter *adapter = netdev_priv(dev);
3234 struct ixgbe_hw *hw = &adapter->hw;
3236 u8 sff8472_rev, addr_mode;
3237 bool page_swap = false;
3239 if (hw->phy.type == ixgbe_phy_fw)
3242 /* Check whether we support SFF-8472 or not */
3243 status = hw->phy.ops.read_i2c_eeprom(hw,
3244 IXGBE_SFF_SFF_8472_COMP,
3249 /* addressing mode is not supported */
3250 status = hw->phy.ops.read_i2c_eeprom(hw,
3251 IXGBE_SFF_SFF_8472_SWAP,
3256 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3257 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3261 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3262 /* We have a SFP, but it does not support SFF-8472 */
3263 modinfo->type = ETH_MODULE_SFF_8079;
3264 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3266 /* We have a SFP which supports a revision of SFF-8472. */
3267 modinfo->type = ETH_MODULE_SFF_8472;
3268 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3274 static int ixgbe_get_module_eeprom(struct net_device *dev,
3275 struct ethtool_eeprom *ee,
3278 struct ixgbe_adapter *adapter = netdev_priv(dev);
3279 struct ixgbe_hw *hw = &adapter->hw;
3280 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3287 if (hw->phy.type == ixgbe_phy_fw)
3290 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3291 /* I2C reads can take long time */
3292 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3295 if (i < ETH_MODULE_SFF_8079_LEN)
3296 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3298 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3303 data[i - ee->offset] = databyte;
3309 static const struct {
3310 ixgbe_link_speed mac_speed;
3312 } ixgbe_ls_map[] = {
3313 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3314 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3315 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3316 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3317 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3320 static const struct {
3323 } ixgbe_lp_map[] = {
3324 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3325 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3326 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3327 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3328 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3329 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3333 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3335 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3336 struct ixgbe_hw *hw = &adapter->hw;
3340 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3344 edata->lp_advertised = 0;
3345 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3346 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3347 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3350 edata->supported = 0;
3351 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3352 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3353 edata->supported |= ixgbe_ls_map[i].supported;
3356 edata->advertised = 0;
3357 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3358 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3359 edata->advertised |= ixgbe_ls_map[i].supported;
3362 edata->eee_enabled = !!edata->advertised;
3363 edata->tx_lpi_enabled = edata->eee_enabled;
3364 if (edata->advertised & edata->lp_advertised)
3365 edata->eee_active = true;
3370 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3372 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3373 struct ixgbe_hw *hw = &adapter->hw;
3375 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3378 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3379 return ixgbe_get_eee_fw(adapter, edata);
3384 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3386 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3387 struct ixgbe_hw *hw = &adapter->hw;
3388 struct ethtool_eee eee_data;
3391 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3394 memset(&eee_data, 0, sizeof(struct ethtool_eee));
3396 ret_val = ixgbe_get_eee(netdev, &eee_data);
3400 if (eee_data.eee_enabled && !edata->eee_enabled) {
3401 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3402 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3406 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3408 "Setting EEE Tx LPI timer is not supported\n");
3412 if (eee_data.advertised != edata->advertised) {
3414 "Setting EEE advertised speeds is not supported\n");
3419 if (eee_data.eee_enabled != edata->eee_enabled) {
3420 if (edata->eee_enabled) {
3421 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3422 hw->phy.eee_speeds_advertised =
3423 hw->phy.eee_speeds_supported;
3425 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3426 hw->phy.eee_speeds_advertised = 0;
3430 if (netif_running(netdev))
3431 ixgbe_reinit_locked(adapter);
3433 ixgbe_reset(adapter);
3439 static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3441 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3444 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3445 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3450 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3452 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3453 unsigned int flags2 = adapter->flags2;
3455 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3456 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3457 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3459 if (flags2 != adapter->flags2) {
3460 adapter->flags2 = flags2;
3462 /* reset interface to repopulate queues */
3463 if (netif_running(netdev))
3464 ixgbe_reinit_locked(adapter);
3470 static const struct ethtool_ops ixgbe_ethtool_ops = {
3471 .get_drvinfo = ixgbe_get_drvinfo,
3472 .get_regs_len = ixgbe_get_regs_len,
3473 .get_regs = ixgbe_get_regs,
3474 .get_wol = ixgbe_get_wol,
3475 .set_wol = ixgbe_set_wol,
3476 .nway_reset = ixgbe_nway_reset,
3477 .get_link = ethtool_op_get_link,
3478 .get_eeprom_len = ixgbe_get_eeprom_len,
3479 .get_eeprom = ixgbe_get_eeprom,
3480 .set_eeprom = ixgbe_set_eeprom,
3481 .get_ringparam = ixgbe_get_ringparam,
3482 .set_ringparam = ixgbe_set_ringparam,
3483 .get_pauseparam = ixgbe_get_pauseparam,
3484 .set_pauseparam = ixgbe_set_pauseparam,
3485 .get_msglevel = ixgbe_get_msglevel,
3486 .set_msglevel = ixgbe_set_msglevel,
3487 .self_test = ixgbe_diag_test,
3488 .get_strings = ixgbe_get_strings,
3489 .set_phys_id = ixgbe_set_phys_id,
3490 .get_sset_count = ixgbe_get_sset_count,
3491 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3492 .get_coalesce = ixgbe_get_coalesce,
3493 .set_coalesce = ixgbe_set_coalesce,
3494 .get_rxnfc = ixgbe_get_rxnfc,
3495 .set_rxnfc = ixgbe_set_rxnfc,
3496 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3497 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3498 .get_rxfh = ixgbe_get_rxfh,
3499 .set_rxfh = ixgbe_set_rxfh,
3500 .get_eee = ixgbe_get_eee,
3501 .set_eee = ixgbe_set_eee,
3502 .get_channels = ixgbe_get_channels,
3503 .set_channels = ixgbe_set_channels,
3504 .get_priv_flags = ixgbe_get_priv_flags,
3505 .set_priv_flags = ixgbe_set_priv_flags,
3506 .get_ts_info = ixgbe_get_ts_info,
3507 .get_module_info = ixgbe_get_module_info,
3508 .get_module_eeprom = ixgbe_get_module_eeprom,
3509 .get_link_ksettings = ixgbe_get_link_ksettings,
3510 .set_link_ksettings = ixgbe_set_link_ksettings,
3513 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3515 netdev->ethtool_ops = &ixgbe_ethtool_ops;