1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/vmalloc.h>
9 char stat_string[ETH_GSTRING_LEN];
14 #define FM10K_NETDEV_STAT(_net_stat) { \
15 .stat_string = #_net_stat, \
16 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
17 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
20 static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
21 FM10K_NETDEV_STAT(tx_packets),
22 FM10K_NETDEV_STAT(tx_bytes),
23 FM10K_NETDEV_STAT(tx_errors),
24 FM10K_NETDEV_STAT(rx_packets),
25 FM10K_NETDEV_STAT(rx_bytes),
26 FM10K_NETDEV_STAT(rx_errors),
27 FM10K_NETDEV_STAT(rx_dropped),
29 /* detailed Rx errors */
30 FM10K_NETDEV_STAT(rx_length_errors),
31 FM10K_NETDEV_STAT(rx_crc_errors),
32 FM10K_NETDEV_STAT(rx_fifo_errors),
35 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
37 #define FM10K_STAT(_name, _stat) { \
38 .stat_string = _name, \
39 .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \
40 .stat_offset = offsetof(struct fm10k_intfc, _stat) \
43 static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
44 FM10K_STAT("tx_restart_queue", restart_queue),
45 FM10K_STAT("tx_busy", tx_busy),
46 FM10K_STAT("tx_csum_errors", tx_csum_errors),
47 FM10K_STAT("rx_alloc_failed", alloc_failed),
48 FM10K_STAT("rx_csum_errors", rx_csum_errors),
50 FM10K_STAT("tx_packets_nic", tx_packets_nic),
51 FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
52 FM10K_STAT("rx_packets_nic", rx_packets_nic),
53 FM10K_STAT("rx_bytes_nic", rx_bytes_nic),
54 FM10K_STAT("rx_drops_nic", rx_drops_nic),
55 FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
56 FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
58 FM10K_STAT("swapi_status", hw.swapi.status),
59 FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
60 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
62 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
64 FM10K_STAT("tx_hang_count", tx_timeout_count),
67 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
68 FM10K_STAT("timeout", stats.timeout.count),
69 FM10K_STAT("ur", stats.ur.count),
70 FM10K_STAT("ca", stats.ca.count),
71 FM10K_STAT("um", stats.um.count),
72 FM10K_STAT("xec", stats.xec.count),
73 FM10K_STAT("vlan_drop", stats.vlan_drop.count),
74 FM10K_STAT("loopback_drop", stats.loopback_drop.count),
75 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
78 #define FM10K_MBX_STAT(_name, _stat) { \
79 .stat_string = _name, \
80 .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
81 .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
84 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
85 FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
86 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped),
87 FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
88 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
89 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled),
90 FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
91 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
92 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
93 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
96 #define FM10K_QUEUE_STAT(_name, _stat) { \
97 .stat_string = _name, \
98 .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
99 .stat_offset = offsetof(struct fm10k_ring, _stat) \
102 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
103 FM10K_QUEUE_STAT("packets", stats.packets),
104 FM10K_QUEUE_STAT("bytes", stats.bytes),
107 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
108 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
109 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
110 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
112 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
113 FM10K_NETDEV_STATS_LEN + \
116 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
117 "Mailbox test (on/offline)"
120 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN)
122 enum fm10k_self_test_types {
124 FM10K_TEST_MAX = FM10K_TEST_LEN
131 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
134 static void fm10k_add_stat_strings(u8 **p, const char *prefix,
135 const struct fm10k_stats stats[],
136 const unsigned int size)
140 for (i = 0; i < size; i++) {
141 snprintf(*p, ETH_GSTRING_LEN, "%s%s",
142 prefix, stats[i].stat_string);
143 *p += ETH_GSTRING_LEN;
147 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
149 struct fm10k_intfc *interface = netdev_priv(dev);
152 fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
153 FM10K_NETDEV_STATS_LEN);
155 fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
156 FM10K_GLOBAL_STATS_LEN);
158 fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
159 FM10K_MBX_STATS_LEN);
161 if (interface->hw.mac.type != fm10k_mac_vf)
162 fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
165 for (i = 0; i < interface->hw.mac.max_queues; i++) {
166 char prefix[ETH_GSTRING_LEN];
168 snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
169 fm10k_add_stat_strings(&data, prefix,
170 fm10k_gstrings_queue_stats,
171 FM10K_QUEUE_STATS_LEN);
173 snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
174 fm10k_add_stat_strings(&data, prefix,
175 fm10k_gstrings_queue_stats,
176 FM10K_QUEUE_STATS_LEN);
180 static void fm10k_get_strings(struct net_device *dev,
181 u32 stringset, u8 *data)
185 memcpy(data, fm10k_gstrings_test,
186 FM10K_TEST_LEN * ETH_GSTRING_LEN);
189 fm10k_get_stat_strings(dev, data);
191 case ETH_SS_PRIV_FLAGS:
192 memcpy(data, fm10k_prv_flags,
193 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
198 static int fm10k_get_sset_count(struct net_device *dev, int sset)
200 struct fm10k_intfc *interface = netdev_priv(dev);
201 struct fm10k_hw *hw = &interface->hw;
202 int stats_len = FM10K_STATIC_STATS_LEN;
206 return FM10K_TEST_LEN;
208 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
210 if (hw->mac.type != fm10k_mac_vf)
211 stats_len += FM10K_PF_STATS_LEN;
214 case ETH_SS_PRIV_FLAGS:
215 return FM10K_PRV_FLAG_LEN;
221 static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
222 const struct fm10k_stats stats[],
223 const unsigned int size)
229 /* memory is not zero allocated so we have to clear it */
230 for (i = 0; i < size; i++)
235 for (i = 0; i < size; i++) {
236 p = (char *)pointer + stats[i].stat_offset;
238 switch (stats[i].sizeof_stat) {
240 *((*data)++) = *(u64 *)p;
243 *((*data)++) = *(u32 *)p;
246 *((*data)++) = *(u16 *)p;
249 *((*data)++) = *(u8 *)p;
257 static void fm10k_get_ethtool_stats(struct net_device *netdev,
258 struct ethtool_stats __always_unused *stats,
261 struct fm10k_intfc *interface = netdev_priv(netdev);
262 struct net_device_stats *net_stats = &netdev->stats;
265 fm10k_update_stats(interface);
267 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
268 FM10K_NETDEV_STATS_LEN);
270 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
271 FM10K_GLOBAL_STATS_LEN);
273 fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
274 fm10k_gstrings_mbx_stats,
275 FM10K_MBX_STATS_LEN);
277 if (interface->hw.mac.type != fm10k_mac_vf) {
278 fm10k_add_ethtool_stats(&data, interface,
279 fm10k_gstrings_pf_stats,
283 for (i = 0; i < interface->hw.mac.max_queues; i++) {
284 struct fm10k_ring *ring;
286 ring = interface->tx_ring[i];
287 fm10k_add_ethtool_stats(&data, ring,
288 fm10k_gstrings_queue_stats,
289 FM10K_QUEUE_STATS_LEN);
291 ring = interface->rx_ring[i];
292 fm10k_add_ethtool_stats(&data, ring,
293 fm10k_gstrings_queue_stats,
294 FM10K_QUEUE_STATS_LEN);
298 /* If function below adds more registers this define needs to be updated */
299 #define FM10K_REGS_LEN_Q 29
301 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i)
305 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i));
306 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i));
307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i));
308 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i));
309 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i));
310 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i));
311 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i));
312 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i));
313 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i));
314 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i));
315 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i));
316 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i));
317 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i));
318 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i));
319 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i));
320 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i));
321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i));
322 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i));
323 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i));
324 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i));
325 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i));
326 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i));
327 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i));
328 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i));
329 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i));
330 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i));
331 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i));
332 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i));
333 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i));
335 BUG_ON(idx != FM10K_REGS_LEN_Q);
338 /* If function above adds more registers this define needs to be updated */
339 #define FM10K_REGS_LEN_VSI 43
341 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i)
345 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i));
346 for (j = 0; j < 10; j++)
347 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j));
348 for (j = 0; j < 32; j++)
349 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j));
351 BUG_ON(idx != FM10K_REGS_LEN_VSI);
354 static void fm10k_get_regs(struct net_device *netdev,
355 struct ethtool_regs *regs, void *p)
357 struct fm10k_intfc *interface = netdev_priv(netdev);
358 struct fm10k_hw *hw = &interface->hw;
362 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
364 switch (hw->mac.type) {
366 /* General PF Registers */
367 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL);
368 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT);
369 *(buff++) = fm10k_read_reg(hw, FM10K_GCR);
370 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT);
372 for (i = 0; i < 8; i++) {
373 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i));
374 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i));
377 for (i = 0; i < 65; i++) {
378 fm10k_get_reg_vsi(hw, buff, i);
379 buff += FM10K_REGS_LEN_VSI;
382 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL);
383 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
385 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
386 fm10k_get_reg_q(hw, buff, i);
387 buff += FM10K_REGS_LEN_Q;
390 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL);
392 for (i = 0; i < 8; i++)
393 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i));
395 /* Interrupt Throttling Registers */
396 for (i = 0; i < 130; i++)
397 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i));
401 /* General VF registers */
402 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL);
403 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP);
404 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME);
406 /* Interrupt Throttling Registers */
407 for (i = 0; i < 8; i++)
408 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i));
410 fm10k_get_reg_vsi(hw, buff, 0);
411 buff += FM10K_REGS_LEN_VSI;
413 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) {
414 if (i < hw->mac.max_queues)
415 fm10k_get_reg_q(hw, buff, i);
417 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q);
418 buff += FM10K_REGS_LEN_Q;
427 /* If function above adds more registers these define need to be updated */
428 #define FM10K_REGS_LEN_PF \
429 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q))
430 #define FM10K_REGS_LEN_VF \
431 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q))
433 static int fm10k_get_regs_len(struct net_device *netdev)
435 struct fm10k_intfc *interface = netdev_priv(netdev);
436 struct fm10k_hw *hw = &interface->hw;
438 switch (hw->mac.type) {
440 return FM10K_REGS_LEN_PF * sizeof(u32);
442 return FM10K_REGS_LEN_VF * sizeof(u32);
448 static void fm10k_get_drvinfo(struct net_device *dev,
449 struct ethtool_drvinfo *info)
451 struct fm10k_intfc *interface = netdev_priv(dev);
453 strncpy(info->driver, fm10k_driver_name,
454 sizeof(info->driver) - 1);
455 strncpy(info->version, fm10k_driver_version,
456 sizeof(info->version) - 1);
457 strncpy(info->bus_info, pci_name(interface->pdev),
458 sizeof(info->bus_info) - 1);
461 static void fm10k_get_pauseparam(struct net_device *dev,
462 struct ethtool_pauseparam *pause)
464 struct fm10k_intfc *interface = netdev_priv(dev);
466 /* record fixed values for autoneg and tx pause */
470 pause->rx_pause = interface->rx_pause ? 1 : 0;
473 static int fm10k_set_pauseparam(struct net_device *dev,
474 struct ethtool_pauseparam *pause)
476 struct fm10k_intfc *interface = netdev_priv(dev);
477 struct fm10k_hw *hw = &interface->hw;
479 if (pause->autoneg || !pause->tx_pause)
482 /* we can only support pause on the PF to avoid head-of-line blocking */
483 if (hw->mac.type == fm10k_mac_pf)
484 interface->rx_pause = pause->rx_pause ? ~0 : 0;
485 else if (pause->rx_pause)
488 if (netif_running(dev))
489 fm10k_update_rx_drop_en(interface);
494 static u32 fm10k_get_msglevel(struct net_device *netdev)
496 struct fm10k_intfc *interface = netdev_priv(netdev);
498 return interface->msg_enable;
501 static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
503 struct fm10k_intfc *interface = netdev_priv(netdev);
505 interface->msg_enable = data;
508 static void fm10k_get_ringparam(struct net_device *netdev,
509 struct ethtool_ringparam *ring)
511 struct fm10k_intfc *interface = netdev_priv(netdev);
513 ring->rx_max_pending = FM10K_MAX_RXD;
514 ring->tx_max_pending = FM10K_MAX_TXD;
515 ring->rx_mini_max_pending = 0;
516 ring->rx_jumbo_max_pending = 0;
517 ring->rx_pending = interface->rx_ring_count;
518 ring->tx_pending = interface->tx_ring_count;
519 ring->rx_mini_pending = 0;
520 ring->rx_jumbo_pending = 0;
523 static int fm10k_set_ringparam(struct net_device *netdev,
524 struct ethtool_ringparam *ring)
526 struct fm10k_intfc *interface = netdev_priv(netdev);
527 struct fm10k_ring *temp_ring;
529 u32 new_rx_count, new_tx_count;
531 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
534 new_tx_count = clamp_t(u32, ring->tx_pending,
535 FM10K_MIN_TXD, FM10K_MAX_TXD);
536 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);
538 new_rx_count = clamp_t(u32, ring->rx_pending,
539 FM10K_MIN_RXD, FM10K_MAX_RXD);
540 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);
542 if ((new_tx_count == interface->tx_ring_count) &&
543 (new_rx_count == interface->rx_ring_count)) {
548 while (test_and_set_bit(__FM10K_RESETTING, interface->state))
549 usleep_range(1000, 2000);
551 if (!netif_running(interface->netdev)) {
552 for (i = 0; i < interface->num_tx_queues; i++)
553 interface->tx_ring[i]->count = new_tx_count;
554 for (i = 0; i < interface->num_rx_queues; i++)
555 interface->rx_ring[i]->count = new_rx_count;
556 interface->tx_ring_count = new_tx_count;
557 interface->rx_ring_count = new_rx_count;
561 /* allocate temporary buffer to store rings in */
562 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
563 temp_ring = vmalloc(i * sizeof(struct fm10k_ring));
570 fm10k_down(interface);
572 /* Setup new Tx resources and free the old Tx resources in that order.
573 * We can then assign the new resources to the rings via a memcpy.
574 * The advantage to this approach is that we are guaranteed to still
575 * have resources even in the case of an allocation failure.
577 if (new_tx_count != interface->tx_ring_count) {
578 for (i = 0; i < interface->num_tx_queues; i++) {
579 memcpy(&temp_ring[i], interface->tx_ring[i],
580 sizeof(struct fm10k_ring));
582 temp_ring[i].count = new_tx_count;
583 err = fm10k_setup_tx_resources(&temp_ring[i]);
587 fm10k_free_tx_resources(&temp_ring[i]);
593 for (i = 0; i < interface->num_tx_queues; i++) {
594 fm10k_free_tx_resources(interface->tx_ring[i]);
596 memcpy(interface->tx_ring[i], &temp_ring[i],
597 sizeof(struct fm10k_ring));
600 interface->tx_ring_count = new_tx_count;
603 /* Repeat the process for the Rx rings if needed */
604 if (new_rx_count != interface->rx_ring_count) {
605 for (i = 0; i < interface->num_rx_queues; i++) {
606 memcpy(&temp_ring[i], interface->rx_ring[i],
607 sizeof(struct fm10k_ring));
609 temp_ring[i].count = new_rx_count;
610 err = fm10k_setup_rx_resources(&temp_ring[i]);
614 fm10k_free_rx_resources(&temp_ring[i]);
620 for (i = 0; i < interface->num_rx_queues; i++) {
621 fm10k_free_rx_resources(interface->rx_ring[i]);
623 memcpy(interface->rx_ring[i], &temp_ring[i],
624 sizeof(struct fm10k_ring));
627 interface->rx_ring_count = new_rx_count;
634 clear_bit(__FM10K_RESETTING, interface->state);
638 static int fm10k_get_coalesce(struct net_device *dev,
639 struct ethtool_coalesce *ec)
641 struct fm10k_intfc *interface = netdev_priv(dev);
643 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr);
644 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE;
646 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr);
647 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE;
652 static int fm10k_set_coalesce(struct net_device *dev,
653 struct ethtool_coalesce *ec)
655 struct fm10k_intfc *interface = netdev_priv(dev);
656 struct fm10k_q_vector *qv;
661 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) ||
662 (ec->tx_coalesce_usecs > FM10K_ITR_MAX))
665 /* record settings */
666 tx_itr = ec->tx_coalesce_usecs;
667 rx_itr = ec->rx_coalesce_usecs;
669 /* set initial values for adaptive ITR */
670 if (ec->use_adaptive_tx_coalesce)
671 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT;
673 if (ec->use_adaptive_rx_coalesce)
674 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
676 /* update interface */
677 interface->tx_itr = tx_itr;
678 interface->rx_itr = rx_itr;
680 /* update q_vectors */
681 for (i = 0; i < interface->num_q_vectors; i++) {
682 qv = interface->q_vector[i];
690 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
691 struct ethtool_rxnfc *cmd)
695 /* Report default options for RSS on fm10k */
696 switch (cmd->flow_type) {
699 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
702 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
704 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
716 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
719 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
721 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
722 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
731 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
732 u32 __always_unused *rule_locs)
734 struct fm10k_intfc *interface = netdev_priv(dev);
735 int ret = -EOPNOTSUPP;
738 case ETHTOOL_GRXRINGS:
739 cmd->data = interface->num_rx_queues;
743 ret = fm10k_get_rss_hash_opts(interface, cmd);
752 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
753 struct ethtool_rxnfc *nfc)
755 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
757 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
760 /* RSS does not support anything other than hashing
761 * to queues on src and dst IPs and ports
763 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
764 RXH_L4_B_0_1 | RXH_L4_B_2_3))
767 switch (nfc->flow_type) {
770 if (!(nfc->data & RXH_IP_SRC) ||
771 !(nfc->data & RXH_IP_DST) ||
772 !(nfc->data & RXH_L4_B_0_1) ||
773 !(nfc->data & RXH_L4_B_2_3))
777 if (!(nfc->data & RXH_IP_SRC) ||
778 !(nfc->data & RXH_IP_DST))
780 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
782 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
785 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
786 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
794 if (!(nfc->data & RXH_IP_SRC) ||
795 !(nfc->data & RXH_IP_DST))
797 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
799 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
802 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
803 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
818 if (!(nfc->data & RXH_IP_SRC) ||
819 !(nfc->data & RXH_IP_DST) ||
820 (nfc->data & RXH_L4_B_0_1) ||
821 (nfc->data & RXH_L4_B_2_3))
828 /* If something changed we need to update the MRQC register. Note that
829 * test_bit() is guaranteed to return strictly 0 or 1, so testing for
832 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
833 interface->flags)) ||
834 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
835 interface->flags))) {
836 struct fm10k_hw *hw = &interface->hw;
840 /* Perform hash on these packet types */
841 mrqc = FM10K_MRQC_IPV4 |
842 FM10K_MRQC_TCP_IPV4 |
846 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
848 mrqc |= FM10K_MRQC_UDP_IPV4;
851 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
853 mrqc |= FM10K_MRQC_UDP_IPV6;
857 /* If we enable UDP RSS display a warning that this may cause
858 * fragmented UDP packets to arrive out of order.
861 netif_warn(interface, drv, interface->netdev,
862 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
864 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
870 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
872 struct fm10k_intfc *interface = netdev_priv(dev);
873 int ret = -EOPNOTSUPP;
877 ret = fm10k_set_rss_hash_opt(interface, cmd);
886 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
888 struct fm10k_hw *hw = &interface->hw;
889 struct fm10k_mbx_info *mbx = &hw->mbx;
890 u32 attr_flag, test_msg[6];
891 unsigned long timeout;
894 /* For now this is a VF only feature */
895 if (hw->mac.type != fm10k_mac_vf)
898 /* loop through both nested and unnested attribute types */
899 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
900 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
901 attr_flag += attr_flag) {
902 /* generate message to be tested */
903 fm10k_tlv_msg_test_create(test_msg, attr_flag);
905 fm10k_mbx_lock(interface);
906 mbx->test_result = FM10K_NOT_IMPLEMENTED;
907 err = mbx->ops.enqueue_tx(hw, mbx, test_msg);
908 fm10k_mbx_unlock(interface);
910 /* wait up to 1 second for response */
911 timeout = jiffies + HZ;
916 usleep_range(500, 1000);
918 fm10k_mbx_lock(interface);
919 mbx->ops.process(hw, mbx);
920 fm10k_mbx_unlock(interface);
922 err = mbx->test_result;
925 } while (time_is_after_jiffies(timeout));
927 /* reporting errors */
933 *data = err < 0 ? (attr_flag) : (err > 0);
937 static void fm10k_self_test(struct net_device *dev,
938 struct ethtool_test *eth_test, u64 *data)
940 struct fm10k_intfc *interface = netdev_priv(dev);
941 struct fm10k_hw *hw = &interface->hw;
943 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
945 if (FM10K_REMOVED(hw->hw_addr)) {
946 netif_err(interface, drv, dev,
947 "Interface removed - test blocked\n");
948 eth_test->flags |= ETH_TEST_FL_FAILED;
952 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX]))
953 eth_test->flags |= ETH_TEST_FL_FAILED;
956 static u32 fm10k_get_priv_flags(struct net_device *netdev)
961 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
963 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
969 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
971 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
974 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
976 u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
977 struct fm10k_hw *hw = &interface->hw;
981 /* record entries to reta table */
982 for (i = 0; i < FM10K_RETA_SIZE; i++) {
985 /* generate a new table if we weren't given one */
986 for (j = 0; j < 4; j++) {
988 n = indir[4 * i + j];
990 n = ethtool_rxfh_indir_default(4 * i + j,
1001 if (interface->reta[i] == reta)
1004 interface->reta[i] = reta;
1005 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1009 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1011 struct fm10k_intfc *interface = netdev_priv(netdev);
1017 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1018 u32 reta = interface->reta[i];
1020 indir[0] = (reta << 24) >> 24;
1021 indir[1] = (reta << 16) >> 24;
1022 indir[2] = (reta << 8) >> 24;
1023 indir[3] = (reta) >> 24;
1029 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1031 struct fm10k_intfc *interface = netdev_priv(netdev);
1038 /* Verify user input. */
1039 rss_i = interface->ring_feature[RING_F_RSS].indices;
1040 for (i = fm10k_get_reta_size(netdev); i--;) {
1041 if (indir[i] < rss_i)
1046 fm10k_write_reta(interface, indir);
1051 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
1053 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
1056 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
1059 struct fm10k_intfc *interface = netdev_priv(netdev);
1063 *hfunc = ETH_RSS_HASH_TOP;
1065 err = fm10k_get_reta(netdev, indir);
1069 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4)
1070 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]);
1075 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
1076 const u8 *key, const u8 hfunc)
1078 struct fm10k_intfc *interface = netdev_priv(netdev);
1079 struct fm10k_hw *hw = &interface->hw;
1082 /* We do not allow change in unsupported parameters */
1083 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1086 err = fm10k_set_reta(netdev, indir);
1090 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
1091 u32 rssrk = le32_to_cpu(*(__le32 *)key);
1093 if (interface->rssrk[i] == rssrk)
1096 interface->rssrk[i] = rssrk;
1097 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk);
1103 static unsigned int fm10k_max_channels(struct net_device *dev)
1105 struct fm10k_intfc *interface = netdev_priv(dev);
1106 unsigned int max_combined = interface->hw.mac.max_queues;
1107 u8 tcs = netdev_get_num_tc(dev);
1109 /* For QoS report channels per traffic class */
1111 max_combined = BIT((fls(max_combined / tcs) - 1));
1113 return max_combined;
1116 static void fm10k_get_channels(struct net_device *dev,
1117 struct ethtool_channels *ch)
1119 struct fm10k_intfc *interface = netdev_priv(dev);
1120 struct fm10k_hw *hw = &interface->hw;
1122 /* report maximum channels */
1123 ch->max_combined = fm10k_max_channels(dev);
1125 /* report info for other vector */
1126 ch->max_other = NON_Q_VECTORS(hw);
1127 ch->other_count = ch->max_other;
1129 /* record RSS queues */
1130 ch->combined_count = interface->ring_feature[RING_F_RSS].indices;
1133 static int fm10k_set_channels(struct net_device *dev,
1134 struct ethtool_channels *ch)
1136 struct fm10k_intfc *interface = netdev_priv(dev);
1137 unsigned int count = ch->combined_count;
1138 struct fm10k_hw *hw = &interface->hw;
1140 /* verify they are not requesting separate vectors */
1141 if (!count || ch->rx_count || ch->tx_count)
1144 /* verify other_count has not changed */
1145 if (ch->other_count != NON_Q_VECTORS(hw))
1148 /* verify the number of channels does not exceed hardware limits */
1149 if (count > fm10k_max_channels(dev))
1152 interface->ring_feature[RING_F_RSS].limit = count;
1154 /* use setup TC to update any traffic class queue mapping */
1155 return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
1158 static const struct ethtool_ops fm10k_ethtool_ops = {
1159 .get_strings = fm10k_get_strings,
1160 .get_sset_count = fm10k_get_sset_count,
1161 .get_ethtool_stats = fm10k_get_ethtool_stats,
1162 .get_drvinfo = fm10k_get_drvinfo,
1163 .get_link = ethtool_op_get_link,
1164 .get_pauseparam = fm10k_get_pauseparam,
1165 .set_pauseparam = fm10k_set_pauseparam,
1166 .get_msglevel = fm10k_get_msglevel,
1167 .set_msglevel = fm10k_set_msglevel,
1168 .get_ringparam = fm10k_get_ringparam,
1169 .set_ringparam = fm10k_set_ringparam,
1170 .get_coalesce = fm10k_get_coalesce,
1171 .set_coalesce = fm10k_set_coalesce,
1172 .get_rxnfc = fm10k_get_rxnfc,
1173 .set_rxnfc = fm10k_set_rxnfc,
1174 .get_regs = fm10k_get_regs,
1175 .get_regs_len = fm10k_get_regs_len,
1176 .self_test = fm10k_self_test,
1177 .get_priv_flags = fm10k_get_priv_flags,
1178 .set_priv_flags = fm10k_set_priv_flags,
1179 .get_rxfh_indir_size = fm10k_get_reta_size,
1180 .get_rxfh_key_size = fm10k_get_rssrk_size,
1181 .get_rxfh = fm10k_get_rssh,
1182 .set_rxfh = fm10k_set_rssh,
1183 .get_channels = fm10k_get_channels,
1184 .set_channels = fm10k_set_channels,
1185 .get_ts_info = ethtool_op_get_ts_info,
1188 void fm10k_set_ethtool_ops(struct net_device *dev)
1190 dev->ethtool_ops = &fm10k_ethtool_ops;