1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/vmalloc.h>
9 /* The stat_string is expected to be a format string formatted using
10 * vsnprintf by fm10k_add_stat_strings. Every member of a stats array
11 * should use the same format specifiers as they will be formatted
12 * using the same variadic arguments.
14 char stat_string[ETH_GSTRING_LEN];
19 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \
20 .stat_string = _name, \
21 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
22 .stat_offset = offsetof(_type, _stat) \
25 /* netdevice statistics */
26 #define FM10K_NETDEV_STAT(_net_stat) \
27 FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \
30 static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
31 FM10K_NETDEV_STAT(tx_packets),
32 FM10K_NETDEV_STAT(tx_bytes),
33 FM10K_NETDEV_STAT(tx_errors),
34 FM10K_NETDEV_STAT(rx_packets),
35 FM10K_NETDEV_STAT(rx_bytes),
36 FM10K_NETDEV_STAT(rx_errors),
37 FM10K_NETDEV_STAT(rx_dropped),
39 /* detailed Rx errors */
40 FM10K_NETDEV_STAT(rx_length_errors),
41 FM10K_NETDEV_STAT(rx_crc_errors),
42 FM10K_NETDEV_STAT(rx_fifo_errors),
45 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
47 /* General interface statistics */
48 #define FM10K_STAT(_name, _stat) \
49 FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat)
51 static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
52 FM10K_STAT("tx_restart_queue", restart_queue),
53 FM10K_STAT("tx_busy", tx_busy),
54 FM10K_STAT("tx_csum_errors", tx_csum_errors),
55 FM10K_STAT("rx_alloc_failed", alloc_failed),
56 FM10K_STAT("rx_csum_errors", rx_csum_errors),
58 FM10K_STAT("tx_packets_nic", tx_packets_nic),
59 FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
60 FM10K_STAT("rx_packets_nic", rx_packets_nic),
61 FM10K_STAT("rx_bytes_nic", rx_bytes_nic),
62 FM10K_STAT("rx_drops_nic", rx_drops_nic),
63 FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
64 FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
66 FM10K_STAT("swapi_status", hw.swapi.status),
67 FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
68 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
70 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
72 FM10K_STAT("tx_hang_count", tx_timeout_count),
75 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
76 FM10K_STAT("timeout", stats.timeout.count),
77 FM10K_STAT("ur", stats.ur.count),
78 FM10K_STAT("ca", stats.ca.count),
79 FM10K_STAT("um", stats.um.count),
80 FM10K_STAT("xec", stats.xec.count),
81 FM10K_STAT("vlan_drop", stats.vlan_drop.count),
82 FM10K_STAT("loopback_drop", stats.loopback_drop.count),
83 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
86 /* mailbox statistics */
87 #define FM10K_MBX_STAT(_name, _stat) \
88 FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat)
90 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
91 FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
92 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped),
93 FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
94 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
95 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled),
96 FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
97 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
98 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
99 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
102 /* per-queue ring statistics */
103 #define FM10K_QUEUE_STAT(_name, _stat) \
104 FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat)
106 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
107 FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets),
108 FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes),
111 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
112 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
113 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
114 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
116 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
117 FM10K_NETDEV_STATS_LEN + \
120 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
121 "Mailbox test (on/offline)"
124 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN)
126 enum fm10k_self_test_types {
128 FM10K_TEST_MAX = FM10K_TEST_LEN
135 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
138 static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
139 const unsigned int size, ...)
143 for (i = 0; i < size; i++) {
146 va_start(args, size);
147 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
148 *p += ETH_GSTRING_LEN;
153 #define fm10k_add_stat_strings(p, stats, ...) \
154 __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
156 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
158 struct fm10k_intfc *interface = netdev_priv(dev);
161 fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats);
163 fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats);
165 fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats);
167 if (interface->hw.mac.type != fm10k_mac_vf)
168 fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats);
170 for (i = 0; i < interface->hw.mac.max_queues; i++) {
171 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
174 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
179 static void fm10k_get_strings(struct net_device *dev,
180 u32 stringset, u8 *data)
184 memcpy(data, fm10k_gstrings_test,
185 FM10K_TEST_LEN * ETH_GSTRING_LEN);
188 fm10k_get_stat_strings(dev, data);
190 case ETH_SS_PRIV_FLAGS:
191 memcpy(data, fm10k_prv_flags,
192 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
197 static int fm10k_get_sset_count(struct net_device *dev, int sset)
199 struct fm10k_intfc *interface = netdev_priv(dev);
200 struct fm10k_hw *hw = &interface->hw;
201 int stats_len = FM10K_STATIC_STATS_LEN;
205 return FM10K_TEST_LEN;
207 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
209 if (hw->mac.type != fm10k_mac_vf)
210 stats_len += FM10K_PF_STATS_LEN;
213 case ETH_SS_PRIV_FLAGS:
214 return FM10K_PRV_FLAG_LEN;
220 static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
221 const struct fm10k_stats stats[],
222 const unsigned int size)
228 /* memory is not zero allocated so we have to clear it */
229 for (i = 0; i < size; i++)
234 for (i = 0; i < size; i++) {
235 p = (char *)pointer + stats[i].stat_offset;
237 switch (stats[i].sizeof_stat) {
239 *((*data)++) = *(u64 *)p;
242 *((*data)++) = *(u32 *)p;
245 *((*data)++) = *(u16 *)p;
248 *((*data)++) = *(u8 *)p;
251 WARN_ONCE(1, "unexpected stat size for %s",
252 stats[i].stat_string);
258 #define fm10k_add_ethtool_stats(data, pointer, stats) \
259 __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
261 static void fm10k_get_ethtool_stats(struct net_device *netdev,
262 struct ethtool_stats __always_unused *stats,
265 struct fm10k_intfc *interface = netdev_priv(netdev);
266 struct net_device_stats *net_stats = &netdev->stats;
269 fm10k_update_stats(interface);
271 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats);
273 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats);
275 fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
276 fm10k_gstrings_mbx_stats);
278 if (interface->hw.mac.type != fm10k_mac_vf) {
279 fm10k_add_ethtool_stats(&data, interface,
280 fm10k_gstrings_pf_stats);
283 for (i = 0; i < interface->hw.mac.max_queues; i++) {
284 struct fm10k_ring *ring;
286 ring = interface->tx_ring[i];
287 fm10k_add_ethtool_stats(&data, ring,
288 fm10k_gstrings_queue_stats);
290 ring = interface->rx_ring[i];
291 fm10k_add_ethtool_stats(&data, ring,
292 fm10k_gstrings_queue_stats);
296 /* If function below adds more registers this define needs to be updated */
297 #define FM10K_REGS_LEN_Q 29
299 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i)
303 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i));
304 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i));
305 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i));
306 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i));
307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i));
308 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i));
309 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i));
310 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i));
311 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i));
312 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i));
313 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i));
314 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i));
315 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i));
316 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i));
317 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i));
318 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i));
319 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i));
320 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i));
321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i));
322 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i));
323 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i));
324 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i));
325 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i));
326 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i));
327 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i));
328 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i));
329 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i));
330 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i));
331 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i));
333 BUG_ON(idx != FM10K_REGS_LEN_Q);
336 /* If function above adds more registers this define needs to be updated */
337 #define FM10K_REGS_LEN_VSI 43
339 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i)
343 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i));
344 for (j = 0; j < 10; j++)
345 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j));
346 for (j = 0; j < 32; j++)
347 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j));
349 BUG_ON(idx != FM10K_REGS_LEN_VSI);
352 static void fm10k_get_regs(struct net_device *netdev,
353 struct ethtool_regs *regs, void *p)
355 struct fm10k_intfc *interface = netdev_priv(netdev);
356 struct fm10k_hw *hw = &interface->hw;
360 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
362 switch (hw->mac.type) {
364 /* General PF Registers */
365 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL);
366 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT);
367 *(buff++) = fm10k_read_reg(hw, FM10K_GCR);
368 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT);
370 for (i = 0; i < 8; i++) {
371 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i));
372 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i));
375 for (i = 0; i < 65; i++) {
376 fm10k_get_reg_vsi(hw, buff, i);
377 buff += FM10K_REGS_LEN_VSI;
380 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL);
381 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
383 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
384 fm10k_get_reg_q(hw, buff, i);
385 buff += FM10K_REGS_LEN_Q;
388 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL);
390 for (i = 0; i < 8; i++)
391 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i));
393 /* Interrupt Throttling Registers */
394 for (i = 0; i < 130; i++)
395 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i));
399 /* General VF registers */
400 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL);
401 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP);
402 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME);
404 /* Interrupt Throttling Registers */
405 for (i = 0; i < 8; i++)
406 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i));
408 fm10k_get_reg_vsi(hw, buff, 0);
409 buff += FM10K_REGS_LEN_VSI;
411 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) {
412 if (i < hw->mac.max_queues)
413 fm10k_get_reg_q(hw, buff, i);
415 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q);
416 buff += FM10K_REGS_LEN_Q;
425 /* If function above adds more registers these define need to be updated */
426 #define FM10K_REGS_LEN_PF \
427 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q))
428 #define FM10K_REGS_LEN_VF \
429 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q))
431 static int fm10k_get_regs_len(struct net_device *netdev)
433 struct fm10k_intfc *interface = netdev_priv(netdev);
434 struct fm10k_hw *hw = &interface->hw;
436 switch (hw->mac.type) {
438 return FM10K_REGS_LEN_PF * sizeof(u32);
440 return FM10K_REGS_LEN_VF * sizeof(u32);
446 static void fm10k_get_drvinfo(struct net_device *dev,
447 struct ethtool_drvinfo *info)
449 struct fm10k_intfc *interface = netdev_priv(dev);
451 strncpy(info->driver, fm10k_driver_name,
452 sizeof(info->driver) - 1);
453 strncpy(info->version, fm10k_driver_version,
454 sizeof(info->version) - 1);
455 strncpy(info->bus_info, pci_name(interface->pdev),
456 sizeof(info->bus_info) - 1);
459 static void fm10k_get_pauseparam(struct net_device *dev,
460 struct ethtool_pauseparam *pause)
462 struct fm10k_intfc *interface = netdev_priv(dev);
464 /* record fixed values for autoneg and tx pause */
468 pause->rx_pause = interface->rx_pause ? 1 : 0;
471 static int fm10k_set_pauseparam(struct net_device *dev,
472 struct ethtool_pauseparam *pause)
474 struct fm10k_intfc *interface = netdev_priv(dev);
475 struct fm10k_hw *hw = &interface->hw;
477 if (pause->autoneg || !pause->tx_pause)
480 /* we can only support pause on the PF to avoid head-of-line blocking */
481 if (hw->mac.type == fm10k_mac_pf)
482 interface->rx_pause = pause->rx_pause ? ~0 : 0;
483 else if (pause->rx_pause)
486 if (netif_running(dev))
487 fm10k_update_rx_drop_en(interface);
492 static u32 fm10k_get_msglevel(struct net_device *netdev)
494 struct fm10k_intfc *interface = netdev_priv(netdev);
496 return interface->msg_enable;
499 static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
501 struct fm10k_intfc *interface = netdev_priv(netdev);
503 interface->msg_enable = data;
506 static void fm10k_get_ringparam(struct net_device *netdev,
507 struct ethtool_ringparam *ring)
509 struct fm10k_intfc *interface = netdev_priv(netdev);
511 ring->rx_max_pending = FM10K_MAX_RXD;
512 ring->tx_max_pending = FM10K_MAX_TXD;
513 ring->rx_mini_max_pending = 0;
514 ring->rx_jumbo_max_pending = 0;
515 ring->rx_pending = interface->rx_ring_count;
516 ring->tx_pending = interface->tx_ring_count;
517 ring->rx_mini_pending = 0;
518 ring->rx_jumbo_pending = 0;
521 static int fm10k_set_ringparam(struct net_device *netdev,
522 struct ethtool_ringparam *ring)
524 struct fm10k_intfc *interface = netdev_priv(netdev);
525 struct fm10k_ring *temp_ring;
527 u32 new_rx_count, new_tx_count;
529 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
532 new_tx_count = clamp_t(u32, ring->tx_pending,
533 FM10K_MIN_TXD, FM10K_MAX_TXD);
534 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);
536 new_rx_count = clamp_t(u32, ring->rx_pending,
537 FM10K_MIN_RXD, FM10K_MAX_RXD);
538 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);
540 if ((new_tx_count == interface->tx_ring_count) &&
541 (new_rx_count == interface->rx_ring_count)) {
546 while (test_and_set_bit(__FM10K_RESETTING, interface->state))
547 usleep_range(1000, 2000);
549 if (!netif_running(interface->netdev)) {
550 for (i = 0; i < interface->num_tx_queues; i++)
551 interface->tx_ring[i]->count = new_tx_count;
552 for (i = 0; i < interface->num_rx_queues; i++)
553 interface->rx_ring[i]->count = new_rx_count;
554 interface->tx_ring_count = new_tx_count;
555 interface->rx_ring_count = new_rx_count;
559 /* allocate temporary buffer to store rings in */
560 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
561 temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
568 fm10k_down(interface);
570 /* Setup new Tx resources and free the old Tx resources in that order.
571 * We can then assign the new resources to the rings via a memcpy.
572 * The advantage to this approach is that we are guaranteed to still
573 * have resources even in the case of an allocation failure.
575 if (new_tx_count != interface->tx_ring_count) {
576 for (i = 0; i < interface->num_tx_queues; i++) {
577 memcpy(&temp_ring[i], interface->tx_ring[i],
578 sizeof(struct fm10k_ring));
580 temp_ring[i].count = new_tx_count;
581 err = fm10k_setup_tx_resources(&temp_ring[i]);
585 fm10k_free_tx_resources(&temp_ring[i]);
591 for (i = 0; i < interface->num_tx_queues; i++) {
592 fm10k_free_tx_resources(interface->tx_ring[i]);
594 memcpy(interface->tx_ring[i], &temp_ring[i],
595 sizeof(struct fm10k_ring));
598 interface->tx_ring_count = new_tx_count;
601 /* Repeat the process for the Rx rings if needed */
602 if (new_rx_count != interface->rx_ring_count) {
603 for (i = 0; i < interface->num_rx_queues; i++) {
604 memcpy(&temp_ring[i], interface->rx_ring[i],
605 sizeof(struct fm10k_ring));
607 temp_ring[i].count = new_rx_count;
608 err = fm10k_setup_rx_resources(&temp_ring[i]);
612 fm10k_free_rx_resources(&temp_ring[i]);
618 for (i = 0; i < interface->num_rx_queues; i++) {
619 fm10k_free_rx_resources(interface->rx_ring[i]);
621 memcpy(interface->rx_ring[i], &temp_ring[i],
622 sizeof(struct fm10k_ring));
625 interface->rx_ring_count = new_rx_count;
632 clear_bit(__FM10K_RESETTING, interface->state);
636 static int fm10k_get_coalesce(struct net_device *dev,
637 struct ethtool_coalesce *ec)
639 struct fm10k_intfc *interface = netdev_priv(dev);
641 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr);
642 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE;
644 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr);
645 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE;
650 static int fm10k_set_coalesce(struct net_device *dev,
651 struct ethtool_coalesce *ec)
653 struct fm10k_intfc *interface = netdev_priv(dev);
654 struct fm10k_q_vector *qv;
659 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) ||
660 (ec->tx_coalesce_usecs > FM10K_ITR_MAX))
663 /* record settings */
664 tx_itr = ec->tx_coalesce_usecs;
665 rx_itr = ec->rx_coalesce_usecs;
667 /* set initial values for adaptive ITR */
668 if (ec->use_adaptive_tx_coalesce)
669 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT;
671 if (ec->use_adaptive_rx_coalesce)
672 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
674 /* update interface */
675 interface->tx_itr = tx_itr;
676 interface->rx_itr = rx_itr;
678 /* update q_vectors */
679 for (i = 0; i < interface->num_q_vectors; i++) {
680 qv = interface->q_vector[i];
688 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
689 struct ethtool_rxnfc *cmd)
693 /* Report default options for RSS on fm10k */
694 switch (cmd->flow_type) {
697 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
700 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
702 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
714 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
717 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
719 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
720 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
729 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
730 u32 __always_unused *rule_locs)
732 struct fm10k_intfc *interface = netdev_priv(dev);
733 int ret = -EOPNOTSUPP;
736 case ETHTOOL_GRXRINGS:
737 cmd->data = interface->num_rx_queues;
741 ret = fm10k_get_rss_hash_opts(interface, cmd);
750 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
751 struct ethtool_rxnfc *nfc)
753 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
755 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
758 /* RSS does not support anything other than hashing
759 * to queues on src and dst IPs and ports
761 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
762 RXH_L4_B_0_1 | RXH_L4_B_2_3))
765 switch (nfc->flow_type) {
768 if (!(nfc->data & RXH_IP_SRC) ||
769 !(nfc->data & RXH_IP_DST) ||
770 !(nfc->data & RXH_L4_B_0_1) ||
771 !(nfc->data & RXH_L4_B_2_3))
775 if (!(nfc->data & RXH_IP_SRC) ||
776 !(nfc->data & RXH_IP_DST))
778 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
780 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
783 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
784 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
792 if (!(nfc->data & RXH_IP_SRC) ||
793 !(nfc->data & RXH_IP_DST))
795 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
797 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
800 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
801 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
816 if (!(nfc->data & RXH_IP_SRC) ||
817 !(nfc->data & RXH_IP_DST) ||
818 (nfc->data & RXH_L4_B_0_1) ||
819 (nfc->data & RXH_L4_B_2_3))
826 /* If something changed we need to update the MRQC register. Note that
827 * test_bit() is guaranteed to return strictly 0 or 1, so testing for
830 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
831 interface->flags)) ||
832 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
833 interface->flags))) {
834 struct fm10k_hw *hw = &interface->hw;
838 /* Perform hash on these packet types */
839 mrqc = FM10K_MRQC_IPV4 |
840 FM10K_MRQC_TCP_IPV4 |
844 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
846 mrqc |= FM10K_MRQC_UDP_IPV4;
849 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
851 mrqc |= FM10K_MRQC_UDP_IPV6;
855 /* If we enable UDP RSS display a warning that this may cause
856 * fragmented UDP packets to arrive out of order.
859 netif_warn(interface, drv, interface->netdev,
860 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
862 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
868 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
870 struct fm10k_intfc *interface = netdev_priv(dev);
871 int ret = -EOPNOTSUPP;
875 ret = fm10k_set_rss_hash_opt(interface, cmd);
884 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
886 struct fm10k_hw *hw = &interface->hw;
887 struct fm10k_mbx_info *mbx = &hw->mbx;
888 u32 attr_flag, test_msg[6];
889 unsigned long timeout;
892 /* For now this is a VF only feature */
893 if (hw->mac.type != fm10k_mac_vf)
896 /* loop through both nested and unnested attribute types */
897 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
898 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
899 attr_flag += attr_flag) {
900 /* generate message to be tested */
901 fm10k_tlv_msg_test_create(test_msg, attr_flag);
903 fm10k_mbx_lock(interface);
904 mbx->test_result = FM10K_NOT_IMPLEMENTED;
905 err = mbx->ops.enqueue_tx(hw, mbx, test_msg);
906 fm10k_mbx_unlock(interface);
908 /* wait up to 1 second for response */
909 timeout = jiffies + HZ;
914 usleep_range(500, 1000);
916 fm10k_mbx_lock(interface);
917 mbx->ops.process(hw, mbx);
918 fm10k_mbx_unlock(interface);
920 err = mbx->test_result;
923 } while (time_is_after_jiffies(timeout));
925 /* reporting errors */
931 *data = err < 0 ? (attr_flag) : (err > 0);
935 static void fm10k_self_test(struct net_device *dev,
936 struct ethtool_test *eth_test, u64 *data)
938 struct fm10k_intfc *interface = netdev_priv(dev);
939 struct fm10k_hw *hw = &interface->hw;
941 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
943 if (FM10K_REMOVED(hw->hw_addr)) {
944 netif_err(interface, drv, dev,
945 "Interface removed - test blocked\n");
946 eth_test->flags |= ETH_TEST_FL_FAILED;
950 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX]))
951 eth_test->flags |= ETH_TEST_FL_FAILED;
954 static u32 fm10k_get_priv_flags(struct net_device *netdev)
959 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
961 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
967 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
969 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
972 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
974 u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
975 struct fm10k_hw *hw = &interface->hw;
979 /* record entries to reta table */
980 for (i = 0; i < FM10K_RETA_SIZE; i++) {
983 /* generate a new table if we weren't given one */
984 for (j = 0; j < 4; j++) {
986 n = indir[4 * i + j];
988 n = ethtool_rxfh_indir_default(4 * i + j,
999 if (interface->reta[i] == reta)
1002 interface->reta[i] = reta;
1003 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1007 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1009 struct fm10k_intfc *interface = netdev_priv(netdev);
1015 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1016 u32 reta = interface->reta[i];
1018 indir[0] = (reta << 24) >> 24;
1019 indir[1] = (reta << 16) >> 24;
1020 indir[2] = (reta << 8) >> 24;
1021 indir[3] = (reta) >> 24;
1027 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1029 struct fm10k_intfc *interface = netdev_priv(netdev);
1036 /* Verify user input. */
1037 rss_i = interface->ring_feature[RING_F_RSS].indices;
1038 for (i = fm10k_get_reta_size(netdev); i--;) {
1039 if (indir[i] < rss_i)
1044 fm10k_write_reta(interface, indir);
1049 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
1051 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
1054 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
1057 struct fm10k_intfc *interface = netdev_priv(netdev);
1061 *hfunc = ETH_RSS_HASH_TOP;
1063 err = fm10k_get_reta(netdev, indir);
1067 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4)
1068 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]);
1073 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
1074 const u8 *key, const u8 hfunc)
1076 struct fm10k_intfc *interface = netdev_priv(netdev);
1077 struct fm10k_hw *hw = &interface->hw;
1080 /* We do not allow change in unsupported parameters */
1081 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1084 err = fm10k_set_reta(netdev, indir);
1088 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
1089 u32 rssrk = le32_to_cpu(*(__le32 *)key);
1091 if (interface->rssrk[i] == rssrk)
1094 interface->rssrk[i] = rssrk;
1095 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk);
1101 static unsigned int fm10k_max_channels(struct net_device *dev)
1103 struct fm10k_intfc *interface = netdev_priv(dev);
1104 unsigned int max_combined = interface->hw.mac.max_queues;
1105 u8 tcs = netdev_get_num_tc(dev);
1107 /* For QoS report channels per traffic class */
1109 max_combined = BIT((fls(max_combined / tcs) - 1));
1111 return max_combined;
1114 static void fm10k_get_channels(struct net_device *dev,
1115 struct ethtool_channels *ch)
1117 struct fm10k_intfc *interface = netdev_priv(dev);
1118 struct fm10k_hw *hw = &interface->hw;
1120 /* report maximum channels */
1121 ch->max_combined = fm10k_max_channels(dev);
1123 /* report info for other vector */
1124 ch->max_other = NON_Q_VECTORS(hw);
1125 ch->other_count = ch->max_other;
1127 /* record RSS queues */
1128 ch->combined_count = interface->ring_feature[RING_F_RSS].indices;
1131 static int fm10k_set_channels(struct net_device *dev,
1132 struct ethtool_channels *ch)
1134 struct fm10k_intfc *interface = netdev_priv(dev);
1135 unsigned int count = ch->combined_count;
1136 struct fm10k_hw *hw = &interface->hw;
1138 /* verify they are not requesting separate vectors */
1139 if (!count || ch->rx_count || ch->tx_count)
1142 /* verify other_count has not changed */
1143 if (ch->other_count != NON_Q_VECTORS(hw))
1146 /* verify the number of channels does not exceed hardware limits */
1147 if (count > fm10k_max_channels(dev))
1150 interface->ring_feature[RING_F_RSS].limit = count;
1152 /* use setup TC to update any traffic class queue mapping */
1153 return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
1156 static const struct ethtool_ops fm10k_ethtool_ops = {
1157 .get_strings = fm10k_get_strings,
1158 .get_sset_count = fm10k_get_sset_count,
1159 .get_ethtool_stats = fm10k_get_ethtool_stats,
1160 .get_drvinfo = fm10k_get_drvinfo,
1161 .get_link = ethtool_op_get_link,
1162 .get_pauseparam = fm10k_get_pauseparam,
1163 .set_pauseparam = fm10k_set_pauseparam,
1164 .get_msglevel = fm10k_get_msglevel,
1165 .set_msglevel = fm10k_set_msglevel,
1166 .get_ringparam = fm10k_get_ringparam,
1167 .set_ringparam = fm10k_set_ringparam,
1168 .get_coalesce = fm10k_get_coalesce,
1169 .set_coalesce = fm10k_set_coalesce,
1170 .get_rxnfc = fm10k_get_rxnfc,
1171 .set_rxnfc = fm10k_set_rxnfc,
1172 .get_regs = fm10k_get_regs,
1173 .get_regs_len = fm10k_get_regs_len,
1174 .self_test = fm10k_self_test,
1175 .get_priv_flags = fm10k_get_priv_flags,
1176 .set_priv_flags = fm10k_set_priv_flags,
1177 .get_rxfh_indir_size = fm10k_get_reta_size,
1178 .get_rxfh_key_size = fm10k_get_rssrk_size,
1179 .get_rxfh = fm10k_get_rssh,
1180 .set_rxfh = fm10k_set_rssh,
1181 .get_channels = fm10k_get_channels,
1182 .set_channels = fm10k_set_channels,
1183 .get_ts_info = ethtool_op_get_ts_info,
1186 void fm10k_set_ethtool_ops(struct net_device *dev)
1188 dev->ethtool_ops = &fm10k_ethtool_ops;