1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
10 #include <linux/pm_runtime.h>
11 #include <net/pkt_sched.h>
12 #include <linux/bpf_trace.h>
13 #include <net/xdp_sock_drv.h>
14 #include <linux/pci.h>
23 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
27 #define IGC_XDP_PASS 0
28 #define IGC_XDP_CONSUMED BIT(0)
29 #define IGC_XDP_TX BIT(1)
30 #define IGC_XDP_REDIRECT BIT(2)
32 static int debug = -1;
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40 char igc_driver_name[] = "igc";
41 static const char igc_driver_string[] = DRV_SUMMARY;
42 static const char igc_copyright[] =
43 "Copyright(c) 2018 Intel Corporation.";
45 static const struct igc_info *igc_info_tbl[] = {
46 [board_base] = &igc_base_info,
49 static const struct pci_device_id igc_pci_tbl[] = {
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
66 /* required last entry */
70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
79 void igc_reset(struct igc_adapter *adapter)
81 struct net_device *dev = adapter->netdev;
82 struct igc_hw *hw = &adapter->hw;
83 struct igc_fc_info *fc = &hw->fc;
86 /* Repartition PBA for greater than 9k MTU if required */
89 /* flow control settings
90 * The high water mark must be low enough to fit one full frame
91 * after transmitting the pause frame. As such we must have enough
92 * space to allow for us to complete our current transmit and then
93 * receive the frame that is in progress from the link partner.
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
100 fc->low_water = fc->high_water - 16;
101 fc->pause_time = 0xFFFF;
103 fc->current_mode = fc->requested_mode;
105 hw->mac.ops.reset_hw(hw);
107 if (hw->mac.ops.init_hw(hw))
108 netdev_err(dev, "Error on hardware initialization\n");
110 /* Re-establish EEE setting */
111 igc_set_eee_i225(hw, true, true, true);
113 if (!netif_running(adapter->netdev))
114 igc_power_down_phy_copper_base(&adapter->hw);
116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
117 wr32(IGC_VET, ETH_P_8021Q);
119 /* Re-enable PTP, where applicable. */
120 igc_ptp_reset(adapter);
122 /* Re-enable TSN offloading, where applicable. */
123 igc_tsn_reset(adapter);
125 igc_get_phy_info(hw);
129 * igc_power_up_link - Power up the phy link
130 * @adapter: address of board private structure
132 static void igc_power_up_link(struct igc_adapter *adapter)
134 igc_reset_phy(&adapter->hw);
136 igc_power_up_phy_copper(&adapter->hw);
138 igc_setup_link(&adapter->hw);
142 * igc_release_hw_control - release control of the h/w to f/w
143 * @adapter: address of board private structure
145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
146 * For ASF and Pass Through versions of f/w this means that the
147 * driver is no longer loaded.
149 static void igc_release_hw_control(struct igc_adapter *adapter)
151 struct igc_hw *hw = &adapter->hw;
154 if (!pci_device_is_present(adapter->pdev))
157 /* Let firmware take over control of h/w */
158 ctrl_ext = rd32(IGC_CTRL_EXT);
160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
164 * igc_get_hw_control - get control of the h/w from f/w
165 * @adapter: address of board private structure
167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
168 * For ASF and Pass Through versions of f/w this means that
169 * the driver is loaded.
171 static void igc_get_hw_control(struct igc_adapter *adapter)
173 struct igc_hw *hw = &adapter->hw;
176 /* Let firmware know the driver has taken over */
177 ctrl_ext = rd32(IGC_CTRL_EXT);
179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
184 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
185 dma_unmap_len(buf, len), DMA_TO_DEVICE);
187 dma_unmap_len_set(buf, len, 0);
191 * igc_clean_tx_ring - Free Tx Buffers
192 * @tx_ring: ring to be cleaned
194 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
196 u16 i = tx_ring->next_to_clean;
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
200 while (i != tx_ring->next_to_use) {
201 union igc_adv_tx_desc *eop_desc, *tx_desc;
203 switch (tx_buffer->type) {
204 case IGC_TX_BUFFER_TYPE_XSK:
207 case IGC_TX_BUFFER_TYPE_XDP:
208 xdp_return_frame(tx_buffer->xdpf);
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
211 case IGC_TX_BUFFER_TYPE_SKB:
212 dev_kfree_skb_any(tx_buffer->skb);
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
220 /* check for eop_desc to determine the end of the packet */
221 eop_desc = tx_buffer->next_to_watch;
222 tx_desc = IGC_TX_DESC(tx_ring, i);
224 /* unmap remaining buffers */
225 while (tx_desc != eop_desc) {
229 if (unlikely(i == tx_ring->count)) {
231 tx_buffer = tx_ring->tx_buffer_info;
232 tx_desc = IGC_TX_DESC(tx_ring, 0);
235 /* unmap any remaining paged data */
236 if (dma_unmap_len(tx_buffer, len))
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
240 tx_buffer->next_to_watch = NULL;
242 /* move us one more past the eop_desc for start of next pkt */
245 if (unlikely(i == tx_ring->count)) {
247 tx_buffer = tx_ring->tx_buffer_info;
251 if (tx_ring->xsk_pool && xsk_frames)
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
254 /* reset BQL for queue */
255 netdev_tx_reset_queue(txring_txq(tx_ring));
257 /* reset next_to_use and next_to_clean */
258 tx_ring->next_to_use = 0;
259 tx_ring->next_to_clean = 0;
263 * igc_free_tx_resources - Free Tx Resources per Queue
264 * @tx_ring: Tx descriptor ring for a specific queue
266 * Free all transmit software resources
268 void igc_free_tx_resources(struct igc_ring *tx_ring)
270 igc_clean_tx_ring(tx_ring);
272 vfree(tx_ring->tx_buffer_info);
273 tx_ring->tx_buffer_info = NULL;
275 /* if not set, then don't free */
279 dma_free_coherent(tx_ring->dev, tx_ring->size,
280 tx_ring->desc, tx_ring->dma);
282 tx_ring->desc = NULL;
286 * igc_free_all_tx_resources - Free Tx Resources for All Queues
287 * @adapter: board private structure
289 * Free all transmit software resources
291 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
295 for (i = 0; i < adapter->num_tx_queues; i++)
296 igc_free_tx_resources(adapter->tx_ring[i]);
300 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
301 * @adapter: board private structure
303 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
307 for (i = 0; i < adapter->num_tx_queues; i++)
308 if (adapter->tx_ring[i])
309 igc_clean_tx_ring(adapter->tx_ring[i]);
313 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
314 * @tx_ring: tx descriptor ring (for a specific queue) to setup
316 * Return 0 on success, negative on failure
318 int igc_setup_tx_resources(struct igc_ring *tx_ring)
320 struct net_device *ndev = tx_ring->netdev;
321 struct device *dev = tx_ring->dev;
324 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
325 tx_ring->tx_buffer_info = vzalloc(size);
326 if (!tx_ring->tx_buffer_info)
329 /* round up to nearest 4K */
330 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
331 tx_ring->size = ALIGN(tx_ring->size, 4096);
333 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
334 &tx_ring->dma, GFP_KERNEL);
339 tx_ring->next_to_use = 0;
340 tx_ring->next_to_clean = 0;
345 vfree(tx_ring->tx_buffer_info);
346 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
351 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
352 * @adapter: board private structure
354 * Return 0 on success, negative on failure
356 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
358 struct net_device *dev = adapter->netdev;
361 for (i = 0; i < adapter->num_tx_queues; i++) {
362 err = igc_setup_tx_resources(adapter->tx_ring[i]);
364 netdev_err(dev, "Error on Tx queue %u setup\n", i);
365 for (i--; i >= 0; i--)
366 igc_free_tx_resources(adapter->tx_ring[i]);
374 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
376 u16 i = rx_ring->next_to_clean;
378 dev_kfree_skb(rx_ring->skb);
381 /* Free all the Rx ring sk_buffs */
382 while (i != rx_ring->next_to_alloc) {
383 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
385 /* Invalidate cache lines that may have been written to by
386 * device so that we avoid corrupting memory.
388 dma_sync_single_range_for_cpu(rx_ring->dev,
390 buffer_info->page_offset,
391 igc_rx_bufsz(rx_ring),
394 /* free resources associated with mapping */
395 dma_unmap_page_attrs(rx_ring->dev,
397 igc_rx_pg_size(rx_ring),
400 __page_frag_cache_drain(buffer_info->page,
401 buffer_info->pagecnt_bias);
404 if (i == rx_ring->count)
409 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
411 struct igc_rx_buffer *bi;
414 for (i = 0; i < ring->count; i++) {
415 bi = &ring->rx_buffer_info[i];
419 xsk_buff_free(bi->xdp);
425 * igc_clean_rx_ring - Free Rx Buffers per Queue
426 * @ring: ring to free buffers from
428 static void igc_clean_rx_ring(struct igc_ring *ring)
431 igc_clean_rx_ring_xsk_pool(ring);
433 igc_clean_rx_ring_page_shared(ring);
435 clear_ring_uses_large_buffer(ring);
437 ring->next_to_alloc = 0;
438 ring->next_to_clean = 0;
439 ring->next_to_use = 0;
443 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
444 * @adapter: board private structure
446 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
450 for (i = 0; i < adapter->num_rx_queues; i++)
451 if (adapter->rx_ring[i])
452 igc_clean_rx_ring(adapter->rx_ring[i]);
456 * igc_free_rx_resources - Free Rx Resources
457 * @rx_ring: ring to clean the resources from
459 * Free all receive software resources
461 void igc_free_rx_resources(struct igc_ring *rx_ring)
463 igc_clean_rx_ring(rx_ring);
465 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
467 vfree(rx_ring->rx_buffer_info);
468 rx_ring->rx_buffer_info = NULL;
470 /* if not set, then don't free */
474 dma_free_coherent(rx_ring->dev, rx_ring->size,
475 rx_ring->desc, rx_ring->dma);
477 rx_ring->desc = NULL;
481 * igc_free_all_rx_resources - Free Rx Resources for All Queues
482 * @adapter: board private structure
484 * Free all receive software resources
486 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
490 for (i = 0; i < adapter->num_rx_queues; i++)
491 igc_free_rx_resources(adapter->rx_ring[i]);
495 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
496 * @rx_ring: rx descriptor ring (for a specific queue) to setup
498 * Returns 0 on success, negative on failure
500 int igc_setup_rx_resources(struct igc_ring *rx_ring)
502 struct net_device *ndev = rx_ring->netdev;
503 struct device *dev = rx_ring->dev;
504 u8 index = rx_ring->queue_index;
505 int size, desc_len, res;
507 /* XDP RX-queue info */
508 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
509 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
510 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
511 rx_ring->q_vector->napi.napi_id);
513 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
518 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
519 rx_ring->rx_buffer_info = vzalloc(size);
520 if (!rx_ring->rx_buffer_info)
523 desc_len = sizeof(union igc_adv_rx_desc);
525 /* Round up to nearest 4K */
526 rx_ring->size = rx_ring->count * desc_len;
527 rx_ring->size = ALIGN(rx_ring->size, 4096);
529 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
530 &rx_ring->dma, GFP_KERNEL);
535 rx_ring->next_to_alloc = 0;
536 rx_ring->next_to_clean = 0;
537 rx_ring->next_to_use = 0;
542 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
543 vfree(rx_ring->rx_buffer_info);
544 rx_ring->rx_buffer_info = NULL;
545 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
550 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
551 * (Descriptors) for all queues
552 * @adapter: board private structure
554 * Return 0 on success, negative on failure
556 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
558 struct net_device *dev = adapter->netdev;
561 for (i = 0; i < adapter->num_rx_queues; i++) {
562 err = igc_setup_rx_resources(adapter->rx_ring[i]);
564 netdev_err(dev, "Error on Rx queue %u setup\n", i);
565 for (i--; i >= 0; i--)
566 igc_free_rx_resources(adapter->rx_ring[i]);
574 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
575 struct igc_ring *ring)
577 if (!igc_xdp_is_enabled(adapter) ||
578 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
581 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
585 * igc_configure_rx_ring - Configure a receive ring after Reset
586 * @adapter: board private structure
587 * @ring: receive ring to be configured
589 * Configure the Rx unit of the MAC after a reset.
591 static void igc_configure_rx_ring(struct igc_adapter *adapter,
592 struct igc_ring *ring)
594 struct igc_hw *hw = &adapter->hw;
595 union igc_adv_rx_desc *rx_desc;
596 int reg_idx = ring->reg_idx;
597 u32 srrctl = 0, rxdctl = 0;
598 u64 rdba = ring->dma;
601 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
602 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
603 if (ring->xsk_pool) {
604 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
605 MEM_TYPE_XSK_BUFF_POOL,
607 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
609 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
610 MEM_TYPE_PAGE_SHARED,
614 if (igc_xdp_is_enabled(adapter))
615 set_ring_uses_large_buffer(ring);
617 /* disable the queue */
618 wr32(IGC_RXDCTL(reg_idx), 0);
620 /* Set DMA base address registers */
621 wr32(IGC_RDBAL(reg_idx),
622 rdba & 0x00000000ffffffffULL);
623 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
624 wr32(IGC_RDLEN(reg_idx),
625 ring->count * sizeof(union igc_adv_rx_desc));
627 /* initialize head and tail */
628 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
629 wr32(IGC_RDH(reg_idx), 0);
630 writel(0, ring->tail);
632 /* reset next-to- use/clean to place SW in sync with hardware */
633 ring->next_to_clean = 0;
634 ring->next_to_use = 0;
637 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
638 else if (ring_uses_large_buffer(ring))
639 buf_size = IGC_RXBUFFER_3072;
641 buf_size = IGC_RXBUFFER_2048;
643 srrctl = rd32(IGC_SRRCTL(reg_idx));
644 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
645 IGC_SRRCTL_DESCTYPE_MASK);
646 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
647 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
648 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
650 wr32(IGC_SRRCTL(reg_idx), srrctl);
652 rxdctl |= IGC_RX_PTHRESH;
653 rxdctl |= IGC_RX_HTHRESH << 8;
654 rxdctl |= IGC_RX_WTHRESH << 16;
656 /* initialize rx_buffer_info */
657 memset(ring->rx_buffer_info, 0,
658 sizeof(struct igc_rx_buffer) * ring->count);
660 /* initialize Rx descriptor 0 */
661 rx_desc = IGC_RX_DESC(ring, 0);
662 rx_desc->wb.upper.length = 0;
664 /* enable receive descriptor fetching */
665 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
667 wr32(IGC_RXDCTL(reg_idx), rxdctl);
671 * igc_configure_rx - Configure receive Unit after Reset
672 * @adapter: board private structure
674 * Configure the Rx unit of the MAC after a reset.
676 static void igc_configure_rx(struct igc_adapter *adapter)
680 /* Setup the HW Rx Head and Tail Descriptor Pointers and
681 * the Base and Length of the Rx Descriptor Ring
683 for (i = 0; i < adapter->num_rx_queues; i++)
684 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
688 * igc_configure_tx_ring - Configure transmit ring after Reset
689 * @adapter: board private structure
690 * @ring: tx ring to configure
692 * Configure a transmit ring after a reset.
694 static void igc_configure_tx_ring(struct igc_adapter *adapter,
695 struct igc_ring *ring)
697 struct igc_hw *hw = &adapter->hw;
698 int reg_idx = ring->reg_idx;
699 u64 tdba = ring->dma;
702 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
704 /* disable the queue */
705 wr32(IGC_TXDCTL(reg_idx), 0);
709 wr32(IGC_TDLEN(reg_idx),
710 ring->count * sizeof(union igc_adv_tx_desc));
711 wr32(IGC_TDBAL(reg_idx),
712 tdba & 0x00000000ffffffffULL);
713 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
715 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
716 wr32(IGC_TDH(reg_idx), 0);
717 writel(0, ring->tail);
719 txdctl |= IGC_TX_PTHRESH;
720 txdctl |= IGC_TX_HTHRESH << 8;
721 txdctl |= IGC_TX_WTHRESH << 16;
723 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
724 wr32(IGC_TXDCTL(reg_idx), txdctl);
728 * igc_configure_tx - Configure transmit Unit after Reset
729 * @adapter: board private structure
731 * Configure the Tx unit of the MAC after a reset.
733 static void igc_configure_tx(struct igc_adapter *adapter)
737 for (i = 0; i < adapter->num_tx_queues; i++)
738 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
742 * igc_setup_mrqc - configure the multiple receive queue control registers
743 * @adapter: Board private structure
745 static void igc_setup_mrqc(struct igc_adapter *adapter)
747 struct igc_hw *hw = &adapter->hw;
748 u32 j, num_rx_queues;
752 netdev_rss_key_fill(rss_key, sizeof(rss_key));
753 for (j = 0; j < 10; j++)
754 wr32(IGC_RSSRK(j), rss_key[j]);
756 num_rx_queues = adapter->rss_queues;
758 if (adapter->rss_indir_tbl_init != num_rx_queues) {
759 for (j = 0; j < IGC_RETA_SIZE; j++)
760 adapter->rss_indir_tbl[j] =
761 (j * num_rx_queues) / IGC_RETA_SIZE;
762 adapter->rss_indir_tbl_init = num_rx_queues;
764 igc_write_rss_indir_tbl(adapter);
766 /* Disable raw packet checksumming so that RSS hash is placed in
767 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
768 * offloads as they are enabled by default
770 rxcsum = rd32(IGC_RXCSUM);
771 rxcsum |= IGC_RXCSUM_PCSD;
773 /* Enable Receive Checksum Offload for SCTP */
774 rxcsum |= IGC_RXCSUM_CRCOFL;
776 /* Don't need to set TUOFL or IPOFL, they default to 1 */
777 wr32(IGC_RXCSUM, rxcsum);
779 /* Generate RSS hash based on packet types, TCP/UDP
780 * port numbers and/or IPv4/v6 src and dst addresses
782 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
783 IGC_MRQC_RSS_FIELD_IPV4_TCP |
784 IGC_MRQC_RSS_FIELD_IPV6 |
785 IGC_MRQC_RSS_FIELD_IPV6_TCP |
786 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
788 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
789 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
790 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
791 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
793 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
795 wr32(IGC_MRQC, mrqc);
799 * igc_setup_rctl - configure the receive control registers
800 * @adapter: Board private structure
802 static void igc_setup_rctl(struct igc_adapter *adapter)
804 struct igc_hw *hw = &adapter->hw;
807 rctl = rd32(IGC_RCTL);
809 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
810 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
812 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
813 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
815 /* enable stripping of CRC. Newer features require
816 * that the HW strips the CRC.
818 rctl |= IGC_RCTL_SECRC;
820 /* disable store bad packets and clear size bits. */
821 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
823 /* enable LPE to allow for reception of jumbo frames */
824 rctl |= IGC_RCTL_LPE;
826 /* disable queue 0 to prevent tail write w/o re-config */
827 wr32(IGC_RXDCTL(0), 0);
829 /* This is useful for sniffing bad packets. */
830 if (adapter->netdev->features & NETIF_F_RXALL) {
831 /* UPE and MPE will be handled by normal PROMISC logic
834 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
835 IGC_RCTL_BAM | /* RX All Bcast Pkts */
836 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
838 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
839 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
842 wr32(IGC_RCTL, rctl);
846 * igc_setup_tctl - configure the transmit control registers
847 * @adapter: Board private structure
849 static void igc_setup_tctl(struct igc_adapter *adapter)
851 struct igc_hw *hw = &adapter->hw;
854 /* disable queue 0 which icould be enabled by default */
855 wr32(IGC_TXDCTL(0), 0);
857 /* Program the Transmit Control Register */
858 tctl = rd32(IGC_TCTL);
859 tctl &= ~IGC_TCTL_CT;
860 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
861 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
863 /* Enable transmits */
866 wr32(IGC_TCTL, tctl);
870 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
871 * @adapter: Pointer to adapter where the filter should be set
872 * @index: Filter index
873 * @type: MAC address filter type (source or destination)
875 * @queue: If non-negative, queue assignment feature is enabled and frames
876 * matching the filter are enqueued onto 'queue'. Otherwise, queue
877 * assignment is disabled.
879 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
880 enum igc_mac_filter_type type,
881 const u8 *addr, int queue)
883 struct net_device *dev = adapter->netdev;
884 struct igc_hw *hw = &adapter->hw;
887 if (WARN_ON(index >= hw->mac.rar_entry_count))
890 ral = le32_to_cpup((__le32 *)(addr));
891 rah = le16_to_cpup((__le16 *)(addr + 4));
893 if (type == IGC_MAC_FILTER_TYPE_SRC) {
894 rah &= ~IGC_RAH_ASEL_MASK;
895 rah |= IGC_RAH_ASEL_SRC_ADDR;
899 rah &= ~IGC_RAH_QSEL_MASK;
900 rah |= (queue << IGC_RAH_QSEL_SHIFT);
901 rah |= IGC_RAH_QSEL_ENABLE;
906 wr32(IGC_RAL(index), ral);
907 wr32(IGC_RAH(index), rah);
909 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
913 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
914 * @adapter: Pointer to adapter where the filter should be cleared
915 * @index: Filter index
917 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
919 struct net_device *dev = adapter->netdev;
920 struct igc_hw *hw = &adapter->hw;
922 if (WARN_ON(index >= hw->mac.rar_entry_count))
925 wr32(IGC_RAL(index), 0);
926 wr32(IGC_RAH(index), 0);
928 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
931 /* Set default MAC address for the PF in the first RAR entry */
932 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
934 struct net_device *dev = adapter->netdev;
935 u8 *addr = adapter->hw.mac.addr;
937 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
939 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
943 * igc_set_mac - Change the Ethernet Address of the NIC
944 * @netdev: network interface device structure
945 * @p: pointer to an address structure
947 * Returns 0 on success, negative on failure
949 static int igc_set_mac(struct net_device *netdev, void *p)
951 struct igc_adapter *adapter = netdev_priv(netdev);
952 struct igc_hw *hw = &adapter->hw;
953 struct sockaddr *addr = p;
955 if (!is_valid_ether_addr(addr->sa_data))
956 return -EADDRNOTAVAIL;
958 eth_hw_addr_set(netdev, addr->sa_data);
959 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
961 /* set the correct pool for the new PF MAC address in entry 0 */
962 igc_set_default_mac_filter(adapter);
968 * igc_write_mc_addr_list - write multicast addresses to MTA
969 * @netdev: network interface device structure
971 * Writes multicast address list to the MTA hash table.
972 * Returns: -ENOMEM on failure
973 * 0 on no addresses written
974 * X on writing X addresses to MTA
976 static int igc_write_mc_addr_list(struct net_device *netdev)
978 struct igc_adapter *adapter = netdev_priv(netdev);
979 struct igc_hw *hw = &adapter->hw;
980 struct netdev_hw_addr *ha;
984 if (netdev_mc_empty(netdev)) {
985 /* nothing to program, so clear mc list */
986 igc_update_mc_addr_list(hw, NULL, 0);
990 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
994 /* The shared function expects a packed array of only addresses. */
996 netdev_for_each_mc_addr(ha, netdev)
997 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
999 igc_update_mc_addr_list(hw, mta_list, i);
1002 return netdev_mc_count(netdev);
1005 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1006 bool *first_flag, bool *insert_empty)
1008 struct igc_adapter *adapter = netdev_priv(ring->netdev);
1009 ktime_t cycle_time = adapter->cycle_time;
1010 ktime_t base_time = adapter->base_time;
1011 ktime_t now = ktime_get_clocktai();
1012 ktime_t baset_est, end_of_cycle;
1016 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1018 baset_est = ktime_add_ns(base_time, cycle_time * (n));
1019 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1021 if (ktime_compare(txtime, end_of_cycle) >= 0) {
1022 if (baset_est != ring->last_ff_cycle) {
1024 ring->last_ff_cycle = baset_est;
1026 if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
1027 *insert_empty = true;
1031 /* Introducing a window at end of cycle on which packets
1032 * potentially not honor launchtime. Window of 5us chosen
1033 * considering software update the tail pointer and packets
1034 * are dma'ed to packet buffer.
1036 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1037 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1040 ring->last_tx_cycle = end_of_cycle;
1042 launchtime = ktime_sub_ns(txtime, baset_est);
1044 div_s64_rem(launchtime, cycle_time, &launchtime);
1048 return cpu_to_le32(launchtime);
1051 static int igc_init_empty_frame(struct igc_ring *ring,
1052 struct igc_tx_buffer *buffer,
1053 struct sk_buff *skb)
1058 size = skb_headlen(skb);
1060 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1061 if (dma_mapping_error(ring->dev, dma)) {
1062 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1067 buffer->protocol = 0;
1068 buffer->bytecount = skb->len;
1069 buffer->gso_segs = 1;
1070 buffer->time_stamp = jiffies;
1071 dma_unmap_len_set(buffer, len, skb->len);
1072 dma_unmap_addr_set(buffer, dma, dma);
1077 static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
1078 struct sk_buff *skb,
1079 struct igc_tx_buffer *first)
1081 union igc_adv_tx_desc *desc;
1082 u32 cmd_type, olinfo_status;
1085 if (!igc_desc_unused(ring))
1088 err = igc_init_empty_frame(ring, first, skb);
1092 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1093 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1095 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1097 desc = IGC_TX_DESC(ring, ring->next_to_use);
1098 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1099 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1100 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1102 netdev_tx_sent_queue(txring_txq(ring), skb->len);
1104 first->next_to_watch = desc;
1106 ring->next_to_use++;
1107 if (ring->next_to_use == ring->count)
1108 ring->next_to_use = 0;
1113 #define IGC_EMPTY_FRAME_SIZE 60
1115 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1116 __le32 launch_time, bool first_flag,
1117 u32 vlan_macip_lens, u32 type_tucmd,
1120 struct igc_adv_tx_context_desc *context_desc;
1121 u16 i = tx_ring->next_to_use;
1123 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1126 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1128 /* set bits to identify this as an advanced context descriptor */
1129 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1131 /* For i225, context index must be unique per ring. */
1132 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1133 mss_l4len_idx |= tx_ring->reg_idx << 4;
1136 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1138 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1139 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1140 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1141 context_desc->launch_time = launch_time;
1144 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1145 __le32 launch_time, bool first_flag)
1147 struct sk_buff *skb = first->skb;
1148 u32 vlan_macip_lens = 0;
1151 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1153 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1154 !tx_ring->launchtime_enable)
1159 switch (skb->csum_offset) {
1160 case offsetof(struct tcphdr, check):
1161 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1163 case offsetof(struct udphdr, check):
1165 case offsetof(struct sctphdr, checksum):
1166 /* validate that this is actually an SCTP request */
1167 if (skb_csum_is_sctp(skb)) {
1168 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1173 skb_checksum_help(skb);
1177 /* update TX checksum flag */
1178 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1179 vlan_macip_lens = skb_checksum_start_offset(skb) -
1180 skb_network_offset(skb);
1182 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1183 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1185 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1186 vlan_macip_lens, type_tucmd, 0);
1189 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1191 struct net_device *netdev = tx_ring->netdev;
1193 netif_stop_subqueue(netdev, tx_ring->queue_index);
1195 /* memory barriier comment */
1198 /* We need to check again in a case another CPU has just
1199 * made room available.
1201 if (igc_desc_unused(tx_ring) < size)
1205 netif_wake_subqueue(netdev, tx_ring->queue_index);
1207 u64_stats_update_begin(&tx_ring->tx_syncp2);
1208 tx_ring->tx_stats.restart_queue2++;
1209 u64_stats_update_end(&tx_ring->tx_syncp2);
1214 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1216 if (igc_desc_unused(tx_ring) >= size)
1218 return __igc_maybe_stop_tx(tx_ring, size);
1221 #define IGC_SET_FLAG(_input, _flag, _result) \
1222 (((_flag) <= (_result)) ? \
1223 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1224 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1226 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1228 /* set type for advanced descriptor with frame checksum insertion */
1229 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1230 IGC_ADVTXD_DCMD_DEXT |
1231 IGC_ADVTXD_DCMD_IFCS;
1233 /* set HW vlan bit if vlan is present */
1234 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1235 IGC_ADVTXD_DCMD_VLE);
1237 /* set segmentation bits for TSO */
1238 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1239 (IGC_ADVTXD_DCMD_TSE));
1241 /* set timestamp bit if present */
1242 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1243 (IGC_ADVTXD_MAC_TSTAMP));
1245 /* insert frame checksum */
1246 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1251 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1252 union igc_adv_tx_desc *tx_desc,
1253 u32 tx_flags, unsigned int paylen)
1255 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1257 /* insert L4 checksum */
1258 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1259 ((IGC_TXD_POPTS_TXSM << 8) /
1262 /* insert IPv4 checksum */
1263 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1264 (((IGC_TXD_POPTS_IXSM << 8)) /
1267 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1270 static int igc_tx_map(struct igc_ring *tx_ring,
1271 struct igc_tx_buffer *first,
1274 struct sk_buff *skb = first->skb;
1275 struct igc_tx_buffer *tx_buffer;
1276 union igc_adv_tx_desc *tx_desc;
1277 u32 tx_flags = first->tx_flags;
1279 u16 i = tx_ring->next_to_use;
1280 unsigned int data_len, size;
1284 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1285 tx_desc = IGC_TX_DESC(tx_ring, i);
1287 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1289 size = skb_headlen(skb);
1290 data_len = skb->data_len;
1292 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1296 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1297 if (dma_mapping_error(tx_ring->dev, dma))
1300 /* record length, and DMA address */
1301 dma_unmap_len_set(tx_buffer, len, size);
1302 dma_unmap_addr_set(tx_buffer, dma, dma);
1304 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1306 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1307 tx_desc->read.cmd_type_len =
1308 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1312 if (i == tx_ring->count) {
1313 tx_desc = IGC_TX_DESC(tx_ring, 0);
1316 tx_desc->read.olinfo_status = 0;
1318 dma += IGC_MAX_DATA_PER_TXD;
1319 size -= IGC_MAX_DATA_PER_TXD;
1321 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1324 if (likely(!data_len))
1327 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1331 if (i == tx_ring->count) {
1332 tx_desc = IGC_TX_DESC(tx_ring, 0);
1335 tx_desc->read.olinfo_status = 0;
1337 size = skb_frag_size(frag);
1340 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1341 size, DMA_TO_DEVICE);
1343 tx_buffer = &tx_ring->tx_buffer_info[i];
1346 /* write last descriptor with RS and EOP bits */
1347 cmd_type |= size | IGC_TXD_DCMD;
1348 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1350 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1352 /* set the timestamp */
1353 first->time_stamp = jiffies;
1355 skb_tx_timestamp(skb);
1357 /* Force memory writes to complete before letting h/w know there
1358 * are new descriptors to fetch. (Only applicable for weak-ordered
1359 * memory model archs, such as IA-64).
1361 * We also need this memory barrier to make certain all of the
1362 * status bits have been updated before next_to_watch is written.
1366 /* set next_to_watch value indicating a packet is present */
1367 first->next_to_watch = tx_desc;
1370 if (i == tx_ring->count)
1373 tx_ring->next_to_use = i;
1375 /* Make sure there is space in the ring for the next send. */
1376 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1378 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1379 writel(i, tx_ring->tail);
1384 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1385 tx_buffer = &tx_ring->tx_buffer_info[i];
1387 /* clear dma mappings for failed tx_buffer_info map */
1388 while (tx_buffer != first) {
1389 if (dma_unmap_len(tx_buffer, len))
1390 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1393 i += tx_ring->count;
1394 tx_buffer = &tx_ring->tx_buffer_info[i];
1397 if (dma_unmap_len(tx_buffer, len))
1398 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1400 dev_kfree_skb_any(tx_buffer->skb);
1401 tx_buffer->skb = NULL;
1403 tx_ring->next_to_use = i;
1408 static int igc_tso(struct igc_ring *tx_ring,
1409 struct igc_tx_buffer *first,
1410 __le32 launch_time, bool first_flag,
1413 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1414 struct sk_buff *skb = first->skb;
1425 u32 paylen, l4_offset;
1428 if (skb->ip_summed != CHECKSUM_PARTIAL)
1431 if (!skb_is_gso(skb))
1434 err = skb_cow_head(skb, 0);
1438 ip.hdr = skb_network_header(skb);
1439 l4.hdr = skb_checksum_start(skb);
1441 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1442 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1444 /* initialize outer IP header fields */
1445 if (ip.v4->version == 4) {
1446 unsigned char *csum_start = skb_checksum_start(skb);
1447 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1449 /* IP header will have to cancel out any data that
1450 * is not a part of the outer IP header
1452 ip.v4->check = csum_fold(csum_partial(trans_start,
1453 csum_start - trans_start,
1455 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1458 first->tx_flags |= IGC_TX_FLAGS_TSO |
1462 ip.v6->payload_len = 0;
1463 first->tx_flags |= IGC_TX_FLAGS_TSO |
1467 /* determine offset of inner transport header */
1468 l4_offset = l4.hdr - skb->data;
1470 /* remove payload length from inner checksum */
1471 paylen = skb->len - l4_offset;
1472 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1473 /* compute length of segmentation header */
1474 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1475 csum_replace_by_diff(&l4.tcp->check,
1476 (__force __wsum)htonl(paylen));
1478 /* compute length of segmentation header */
1479 *hdr_len = sizeof(*l4.udp) + l4_offset;
1480 csum_replace_by_diff(&l4.udp->check,
1481 (__force __wsum)htonl(paylen));
1484 /* update gso size and bytecount with header size */
1485 first->gso_segs = skb_shinfo(skb)->gso_segs;
1486 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1489 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1490 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1492 /* VLAN MACLEN IPLEN */
1493 vlan_macip_lens = l4.hdr - ip.hdr;
1494 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1495 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1497 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1498 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1503 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1504 struct igc_ring *tx_ring)
1506 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1507 bool first_flag = false, insert_empty = false;
1508 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1509 __be16 protocol = vlan_get_protocol(skb);
1510 struct igc_tx_buffer *first;
1511 __le32 launch_time = 0;
1518 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1519 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1520 * + 2 desc gap to keep tail from touching head,
1521 * + 1 desc for context descriptor,
1522 * otherwise try next time
1524 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1525 count += TXD_USE_COUNT(skb_frag_size(
1526 &skb_shinfo(skb)->frags[f]));
1528 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1529 /* this is a hard error */
1530 return NETDEV_TX_BUSY;
1533 if (!tx_ring->launchtime_enable)
1536 txtime = skb->tstamp;
1537 skb->tstamp = ktime_set(0, 0);
1538 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1541 struct igc_tx_buffer *empty_info;
1542 struct sk_buff *empty;
1545 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1546 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1550 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1551 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1553 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1555 if (igc_init_tx_empty_descriptor(tx_ring,
1558 dev_kfree_skb_any(empty);
1562 /* record the location of the first descriptor for this packet */
1563 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1564 first->type = IGC_TX_BUFFER_TYPE_SKB;
1566 first->bytecount = skb->len;
1567 first->gso_segs = 1;
1569 if (tx_ring->max_sdu > 0) {
1572 max_sdu = tx_ring->max_sdu +
1573 (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0);
1575 if (first->bytecount > max_sdu) {
1576 adapter->stats.txdrop++;
1581 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1582 /* FIXME: add support for retrieving timestamps from
1583 * the other timer registers before skipping the
1584 * timestamping request.
1586 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1587 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1589 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1590 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1592 adapter->ptp_tx_skb = skb_get(skb);
1593 adapter->ptp_tx_start = jiffies;
1595 adapter->tx_hwtstamp_skipped++;
1599 if (skb_vlan_tag_present(skb)) {
1600 tx_flags |= IGC_TX_FLAGS_VLAN;
1601 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1604 /* record initial flags and protocol */
1605 first->tx_flags = tx_flags;
1606 first->protocol = protocol;
1608 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1612 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1614 igc_tx_map(tx_ring, first, hdr_len);
1616 return NETDEV_TX_OK;
1619 dev_kfree_skb_any(first->skb);
1622 return NETDEV_TX_OK;
1625 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1626 struct sk_buff *skb)
1628 unsigned int r_idx = skb->queue_mapping;
1630 if (r_idx >= adapter->num_tx_queues)
1631 r_idx = r_idx % adapter->num_tx_queues;
1633 return adapter->tx_ring[r_idx];
1636 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1637 struct net_device *netdev)
1639 struct igc_adapter *adapter = netdev_priv(netdev);
1641 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1642 * in order to meet this minimum size requirement.
1644 if (skb->len < 17) {
1645 if (skb_padto(skb, 17))
1646 return NETDEV_TX_OK;
1650 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1653 static void igc_rx_checksum(struct igc_ring *ring,
1654 union igc_adv_rx_desc *rx_desc,
1655 struct sk_buff *skb)
1657 skb_checksum_none_assert(skb);
1659 /* Ignore Checksum bit is set */
1660 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1663 /* Rx checksum disabled via ethtool */
1664 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1667 /* TCP/UDP checksum error bit is set */
1668 if (igc_test_staterr(rx_desc,
1669 IGC_RXDEXT_STATERR_L4E |
1670 IGC_RXDEXT_STATERR_IPE)) {
1671 /* work around errata with sctp packets where the TCPE aka
1672 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1673 * packets (aka let the stack check the crc32c)
1675 if (!(skb->len == 60 &&
1676 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1677 u64_stats_update_begin(&ring->rx_syncp);
1678 ring->rx_stats.csum_err++;
1679 u64_stats_update_end(&ring->rx_syncp);
1681 /* let the stack verify checksum errors */
1684 /* It must be a TCP or UDP packet with a valid checksum */
1685 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1686 IGC_RXD_STAT_UDPCS))
1687 skb->ip_summed = CHECKSUM_UNNECESSARY;
1689 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1690 le32_to_cpu(rx_desc->wb.upper.status_error));
1693 /* Mapping HW RSS Type to enum pkt_hash_types */
1694 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1695 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
1696 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
1697 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
1698 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
1699 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
1700 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
1701 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1702 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
1703 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
1704 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1705 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
1706 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1707 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
1708 [13] = PKT_HASH_TYPE_NONE,
1709 [14] = PKT_HASH_TYPE_NONE,
1710 [15] = PKT_HASH_TYPE_NONE,
1713 static inline void igc_rx_hash(struct igc_ring *ring,
1714 union igc_adv_rx_desc *rx_desc,
1715 struct sk_buff *skb)
1717 if (ring->netdev->features & NETIF_F_RXHASH) {
1718 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1719 u32 rss_type = igc_rss_type(rx_desc);
1721 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1725 static void igc_rx_vlan(struct igc_ring *rx_ring,
1726 union igc_adv_rx_desc *rx_desc,
1727 struct sk_buff *skb)
1729 struct net_device *dev = rx_ring->netdev;
1732 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1733 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1734 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1735 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1736 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1738 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1740 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1745 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1746 * @rx_ring: rx descriptor ring packet is being transacted on
1747 * @rx_desc: pointer to the EOP Rx descriptor
1748 * @skb: pointer to current skb being populated
1750 * This function checks the ring, descriptor, and packet information in order
1751 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1754 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1755 union igc_adv_rx_desc *rx_desc,
1756 struct sk_buff *skb)
1758 igc_rx_hash(rx_ring, rx_desc, skb);
1760 igc_rx_checksum(rx_ring, rx_desc, skb);
1762 igc_rx_vlan(rx_ring, rx_desc, skb);
1764 skb_record_rx_queue(skb, rx_ring->queue_index);
1766 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1769 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1771 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1772 struct igc_adapter *adapter = netdev_priv(netdev);
1773 struct igc_hw *hw = &adapter->hw;
1776 ctrl = rd32(IGC_CTRL);
1779 /* enable VLAN tag insert/strip */
1780 ctrl |= IGC_CTRL_VME;
1782 /* disable VLAN tag insert/strip */
1783 ctrl &= ~IGC_CTRL_VME;
1785 wr32(IGC_CTRL, ctrl);
1788 static void igc_restore_vlan(struct igc_adapter *adapter)
1790 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1793 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1794 const unsigned int size,
1795 int *rx_buffer_pgcnt)
1797 struct igc_rx_buffer *rx_buffer;
1799 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1801 #if (PAGE_SIZE < 8192)
1802 page_count(rx_buffer->page);
1806 prefetchw(rx_buffer->page);
1808 /* we are reusing so sync this buffer for CPU use */
1809 dma_sync_single_range_for_cpu(rx_ring->dev,
1811 rx_buffer->page_offset,
1815 rx_buffer->pagecnt_bias--;
1820 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1821 unsigned int truesize)
1823 #if (PAGE_SIZE < 8192)
1824 buffer->page_offset ^= truesize;
1826 buffer->page_offset += truesize;
1830 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1833 unsigned int truesize;
1835 #if (PAGE_SIZE < 8192)
1836 truesize = igc_rx_pg_size(ring) / 2;
1838 truesize = ring_uses_build_skb(ring) ?
1839 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1840 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1841 SKB_DATA_ALIGN(size);
1847 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1848 * @rx_ring: rx descriptor ring to transact packets on
1849 * @rx_buffer: buffer containing page to add
1850 * @skb: sk_buff to place the data into
1851 * @size: size of buffer to be added
1853 * This function will add the data contained in rx_buffer->page to the skb.
1855 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1856 struct igc_rx_buffer *rx_buffer,
1857 struct sk_buff *skb,
1860 unsigned int truesize;
1862 #if (PAGE_SIZE < 8192)
1863 truesize = igc_rx_pg_size(rx_ring) / 2;
1865 truesize = ring_uses_build_skb(rx_ring) ?
1866 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1867 SKB_DATA_ALIGN(size);
1869 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1870 rx_buffer->page_offset, size, truesize);
1872 igc_rx_buffer_flip(rx_buffer, truesize);
1875 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1876 struct igc_rx_buffer *rx_buffer,
1877 struct xdp_buff *xdp)
1879 unsigned int size = xdp->data_end - xdp->data;
1880 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1881 unsigned int metasize = xdp->data - xdp->data_meta;
1882 struct sk_buff *skb;
1884 /* prefetch first cache line of first page */
1885 net_prefetch(xdp->data_meta);
1887 /* build an skb around the page buffer */
1888 skb = napi_build_skb(xdp->data_hard_start, truesize);
1892 /* update pointers within the skb to store the data */
1893 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1894 __skb_put(skb, size);
1896 skb_metadata_set(skb, metasize);
1898 igc_rx_buffer_flip(rx_buffer, truesize);
1902 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1903 struct igc_rx_buffer *rx_buffer,
1904 struct xdp_buff *xdp,
1907 unsigned int metasize = xdp->data - xdp->data_meta;
1908 unsigned int size = xdp->data_end - xdp->data;
1909 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1910 void *va = xdp->data;
1911 unsigned int headlen;
1912 struct sk_buff *skb;
1914 /* prefetch first cache line of first page */
1915 net_prefetch(xdp->data_meta);
1917 /* allocate a skb to store the frags */
1918 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1919 IGC_RX_HDR_LEN + metasize);
1924 skb_hwtstamps(skb)->hwtstamp = timestamp;
1926 /* Determine available headroom for copy */
1928 if (headlen > IGC_RX_HDR_LEN)
1929 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1931 /* align pull length to size of long to optimize memcpy performance */
1932 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1933 ALIGN(headlen + metasize, sizeof(long)));
1936 skb_metadata_set(skb, metasize);
1937 __skb_pull(skb, metasize);
1940 /* update all of the pointers */
1943 skb_add_rx_frag(skb, 0, rx_buffer->page,
1944 (va + headlen) - page_address(rx_buffer->page),
1946 igc_rx_buffer_flip(rx_buffer, truesize);
1948 rx_buffer->pagecnt_bias++;
1955 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1956 * @rx_ring: rx descriptor ring to store buffers on
1957 * @old_buff: donor buffer to have page reused
1959 * Synchronizes page for reuse by the adapter
1961 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1962 struct igc_rx_buffer *old_buff)
1964 u16 nta = rx_ring->next_to_alloc;
1965 struct igc_rx_buffer *new_buff;
1967 new_buff = &rx_ring->rx_buffer_info[nta];
1969 /* update, and store next to alloc */
1971 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1973 /* Transfer page from old buffer to new buffer.
1974 * Move each member individually to avoid possible store
1975 * forwarding stalls.
1977 new_buff->dma = old_buff->dma;
1978 new_buff->page = old_buff->page;
1979 new_buff->page_offset = old_buff->page_offset;
1980 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1983 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1984 int rx_buffer_pgcnt)
1986 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1987 struct page *page = rx_buffer->page;
1989 /* avoid re-using remote and pfmemalloc pages */
1990 if (!dev_page_is_reusable(page))
1993 #if (PAGE_SIZE < 8192)
1994 /* if we are only owner of page we can reuse it */
1995 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1998 #define IGC_LAST_OFFSET \
1999 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
2001 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
2005 /* If we have drained the page fragment pool we need to update
2006 * the pagecnt_bias and page count so that we fully restock the
2007 * number of references the driver holds.
2009 if (unlikely(pagecnt_bias == 1)) {
2010 page_ref_add(page, USHRT_MAX - 1);
2011 rx_buffer->pagecnt_bias = USHRT_MAX;
2018 * igc_is_non_eop - process handling of non-EOP buffers
2019 * @rx_ring: Rx ring being processed
2020 * @rx_desc: Rx descriptor for current buffer
2022 * This function updates next to clean. If the buffer is an EOP buffer
2023 * this function exits returning false, otherwise it will place the
2024 * sk_buff in the next buffer to be chained and return true indicating
2025 * that this is in fact a non-EOP buffer.
2027 static bool igc_is_non_eop(struct igc_ring *rx_ring,
2028 union igc_adv_rx_desc *rx_desc)
2030 u32 ntc = rx_ring->next_to_clean + 1;
2032 /* fetch, update, and store next to clean */
2033 ntc = (ntc < rx_ring->count) ? ntc : 0;
2034 rx_ring->next_to_clean = ntc;
2036 prefetch(IGC_RX_DESC(rx_ring, ntc));
2038 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2045 * igc_cleanup_headers - Correct corrupted or empty headers
2046 * @rx_ring: rx descriptor ring packet is being transacted on
2047 * @rx_desc: pointer to the EOP Rx descriptor
2048 * @skb: pointer to current skb being fixed
2050 * Address the case where we are pulling data in on pages only
2051 * and as such no data is present in the skb header.
2053 * In addition if skb is not at least 60 bytes we need to pad it so that
2054 * it is large enough to qualify as a valid Ethernet frame.
2056 * Returns true if an error was encountered and skb was freed.
2058 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2059 union igc_adv_rx_desc *rx_desc,
2060 struct sk_buff *skb)
2062 /* XDP packets use error pointer so abort at this point */
2066 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2067 struct net_device *netdev = rx_ring->netdev;
2069 if (!(netdev->features & NETIF_F_RXALL)) {
2070 dev_kfree_skb_any(skb);
2075 /* if eth_skb_pad returns an error the skb was freed */
2076 if (eth_skb_pad(skb))
2082 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2083 struct igc_rx_buffer *rx_buffer,
2084 int rx_buffer_pgcnt)
2086 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2087 /* hand second half of page back to the ring */
2088 igc_reuse_rx_page(rx_ring, rx_buffer);
2090 /* We are not reusing the buffer so unmap it and free
2091 * any references we are holding to it
2093 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2094 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2096 __page_frag_cache_drain(rx_buffer->page,
2097 rx_buffer->pagecnt_bias);
2100 /* clear contents of rx_buffer */
2101 rx_buffer->page = NULL;
2104 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2106 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2108 if (ring_uses_build_skb(rx_ring))
2110 if (igc_xdp_is_enabled(adapter))
2111 return XDP_PACKET_HEADROOM;
2116 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2117 struct igc_rx_buffer *bi)
2119 struct page *page = bi->page;
2122 /* since we are recycling buffers we should seldom need to alloc */
2126 /* alloc new page for storage */
2127 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2128 if (unlikely(!page)) {
2129 rx_ring->rx_stats.alloc_failed++;
2133 /* map page for use */
2134 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2135 igc_rx_pg_size(rx_ring),
2139 /* if mapping failed free memory back to system since
2140 * there isn't much point in holding memory we can't use
2142 if (dma_mapping_error(rx_ring->dev, dma)) {
2145 rx_ring->rx_stats.alloc_failed++;
2151 bi->page_offset = igc_rx_offset(rx_ring);
2152 page_ref_add(page, USHRT_MAX - 1);
2153 bi->pagecnt_bias = USHRT_MAX;
2159 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2160 * @rx_ring: rx descriptor ring
2161 * @cleaned_count: number of buffers to clean
2163 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2165 union igc_adv_rx_desc *rx_desc;
2166 u16 i = rx_ring->next_to_use;
2167 struct igc_rx_buffer *bi;
2174 rx_desc = IGC_RX_DESC(rx_ring, i);
2175 bi = &rx_ring->rx_buffer_info[i];
2176 i -= rx_ring->count;
2178 bufsz = igc_rx_bufsz(rx_ring);
2181 if (!igc_alloc_mapped_page(rx_ring, bi))
2184 /* sync the buffer for use by the device */
2185 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2186 bi->page_offset, bufsz,
2189 /* Refresh the desc even if buffer_addrs didn't change
2190 * because each write-back erases this info.
2192 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2198 rx_desc = IGC_RX_DESC(rx_ring, 0);
2199 bi = rx_ring->rx_buffer_info;
2200 i -= rx_ring->count;
2203 /* clear the length for the next_to_use descriptor */
2204 rx_desc->wb.upper.length = 0;
2207 } while (cleaned_count);
2209 i += rx_ring->count;
2211 if (rx_ring->next_to_use != i) {
2212 /* record the next descriptor to use */
2213 rx_ring->next_to_use = i;
2215 /* update next to alloc since we have filled the ring */
2216 rx_ring->next_to_alloc = i;
2218 /* Force memory writes to complete before letting h/w
2219 * know there are new descriptors to fetch. (Only
2220 * applicable for weak-ordered memory model archs,
2224 writel(i, rx_ring->tail);
2228 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2230 union igc_adv_rx_desc *desc;
2231 u16 i = ring->next_to_use;
2232 struct igc_rx_buffer *bi;
2239 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
2241 desc = IGC_RX_DESC(ring, i);
2242 bi = &ring->rx_buffer_info[i];
2246 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2252 dma = xsk_buff_xdp_get_dma(bi->xdp);
2253 desc->read.pkt_addr = cpu_to_le64(dma);
2259 desc = IGC_RX_DESC(ring, 0);
2260 bi = ring->rx_buffer_info;
2264 /* Clear the length for the next_to_use descriptor. */
2265 desc->wb.upper.length = 0;
2272 if (ring->next_to_use != i) {
2273 ring->next_to_use = i;
2275 /* Force memory writes to complete before letting h/w
2276 * know there are new descriptors to fetch. (Only
2277 * applicable for weak-ordered memory model archs,
2281 writel(i, ring->tail);
2287 /* This function requires __netif_tx_lock is held by the caller. */
2288 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2289 struct xdp_frame *xdpf)
2291 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2292 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2293 u16 count, index = ring->next_to_use;
2294 struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2295 struct igc_tx_buffer *buffer = head;
2296 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2297 u32 olinfo_status, len = xdpf->len, cmd_type;
2298 void *data = xdpf->data;
2301 count = TXD_USE_COUNT(len);
2302 for (i = 0; i < nr_frags; i++)
2303 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2305 if (igc_maybe_stop_tx(ring, count + 3)) {
2306 /* this is a hard error */
2311 head->bytecount = xdp_get_frame_len(xdpf);
2312 head->type = IGC_TX_BUFFER_TYPE_XDP;
2316 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2317 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2322 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2323 if (dma_mapping_error(ring->dev, dma)) {
2324 netdev_err_once(ring->netdev,
2325 "Failed to map DMA for TX\n");
2329 dma_unmap_len_set(buffer, len, len);
2330 dma_unmap_addr_set(buffer, dma, dma);
2332 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2333 IGC_ADVTXD_DCMD_IFCS | len;
2335 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2336 desc->read.buffer_addr = cpu_to_le64(dma);
2338 buffer->protocol = 0;
2340 if (++index == ring->count)
2346 buffer = &ring->tx_buffer_info[index];
2347 desc = IGC_TX_DESC(ring, index);
2348 desc->read.olinfo_status = 0;
2350 data = skb_frag_address(&sinfo->frags[i]);
2351 len = skb_frag_size(&sinfo->frags[i]);
2354 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2356 netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2357 /* set the timestamp */
2358 head->time_stamp = jiffies;
2359 /* set next_to_watch value indicating a packet is present */
2360 head->next_to_watch = desc;
2361 ring->next_to_use = index;
2367 buffer = &ring->tx_buffer_info[index];
2368 if (dma_unmap_len(buffer, len))
2369 dma_unmap_page(ring->dev,
2370 dma_unmap_addr(buffer, dma),
2371 dma_unmap_len(buffer, len),
2373 dma_unmap_len_set(buffer, len, 0);
2378 index += ring->count;
2385 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2390 if (unlikely(index < 0))
2393 while (index >= adapter->num_tx_queues)
2394 index -= adapter->num_tx_queues;
2396 return adapter->tx_ring[index];
2399 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2401 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2402 int cpu = smp_processor_id();
2403 struct netdev_queue *nq;
2404 struct igc_ring *ring;
2407 if (unlikely(!xdpf))
2410 ring = igc_xdp_get_tx_ring(adapter, cpu);
2411 nq = txring_txq(ring);
2413 __netif_tx_lock(nq, cpu);
2414 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2415 __netif_tx_unlock(nq);
2419 /* This function assumes rcu_read_lock() is held by the caller. */
2420 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2421 struct bpf_prog *prog,
2422 struct xdp_buff *xdp)
2424 u32 act = bpf_prog_run_xdp(prog, xdp);
2428 return IGC_XDP_PASS;
2430 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2434 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2436 return IGC_XDP_REDIRECT;
2439 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2443 trace_xdp_exception(adapter->netdev, prog, act);
2446 return IGC_XDP_CONSUMED;
2450 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2451 struct xdp_buff *xdp)
2453 struct bpf_prog *prog;
2456 prog = READ_ONCE(adapter->xdp_prog);
2462 res = __igc_xdp_run_prog(adapter, prog, xdp);
2465 return ERR_PTR(-res);
2468 /* This function assumes __netif_tx_lock is held by the caller. */
2469 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2471 /* Once tail pointer is updated, hardware can fetch the descriptors
2472 * any time so we issue a write membar here to ensure all memory
2473 * writes are complete before the tail pointer is updated.
2476 writel(ring->next_to_use, ring->tail);
2479 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2481 int cpu = smp_processor_id();
2482 struct netdev_queue *nq;
2483 struct igc_ring *ring;
2485 if (status & IGC_XDP_TX) {
2486 ring = igc_xdp_get_tx_ring(adapter, cpu);
2487 nq = txring_txq(ring);
2489 __netif_tx_lock(nq, cpu);
2490 igc_flush_tx_descriptors(ring);
2491 __netif_tx_unlock(nq);
2494 if (status & IGC_XDP_REDIRECT)
2498 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2499 unsigned int packets, unsigned int bytes)
2501 struct igc_ring *ring = q_vector->rx.ring;
2503 u64_stats_update_begin(&ring->rx_syncp);
2504 ring->rx_stats.packets += packets;
2505 ring->rx_stats.bytes += bytes;
2506 u64_stats_update_end(&ring->rx_syncp);
2508 q_vector->rx.total_packets += packets;
2509 q_vector->rx.total_bytes += bytes;
2512 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2514 unsigned int total_bytes = 0, total_packets = 0;
2515 struct igc_adapter *adapter = q_vector->adapter;
2516 struct igc_ring *rx_ring = q_vector->rx.ring;
2517 struct sk_buff *skb = rx_ring->skb;
2518 u16 cleaned_count = igc_desc_unused(rx_ring);
2519 int xdp_status = 0, rx_buffer_pgcnt;
2521 while (likely(total_packets < budget)) {
2522 union igc_adv_rx_desc *rx_desc;
2523 struct igc_rx_buffer *rx_buffer;
2524 unsigned int size, truesize;
2525 struct igc_xdp_buff ctx;
2526 ktime_t timestamp = 0;
2530 /* return some buffers to hardware, one at a time is too slow */
2531 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2532 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2536 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2537 size = le16_to_cpu(rx_desc->wb.upper.length);
2541 /* This memory barrier is needed to keep us from reading
2542 * any other fields out of the rx_desc until we know the
2543 * descriptor has been written back
2547 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2548 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2550 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2552 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2553 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2555 ctx.rx_ts = timestamp;
2556 pkt_offset = IGC_TS_HDR_LEN;
2557 size -= IGC_TS_HDR_LEN;
2561 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
2562 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
2563 igc_rx_offset(rx_ring) + pkt_offset,
2565 xdp_buff_clear_frags_flag(&ctx.xdp);
2566 ctx.rx_desc = rx_desc;
2568 skb = igc_xdp_run_prog(adapter, &ctx.xdp);
2572 unsigned int xdp_res = -PTR_ERR(skb);
2575 case IGC_XDP_CONSUMED:
2576 rx_buffer->pagecnt_bias++;
2579 case IGC_XDP_REDIRECT:
2580 igc_rx_buffer_flip(rx_buffer, truesize);
2581 xdp_status |= xdp_res;
2586 total_bytes += size;
2588 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2589 else if (ring_uses_build_skb(rx_ring))
2590 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
2592 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
2595 /* exit if we failed to retrieve a buffer */
2597 rx_ring->rx_stats.alloc_failed++;
2598 rx_buffer->pagecnt_bias++;
2602 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2605 /* fetch next buffer in frame if non-eop */
2606 if (igc_is_non_eop(rx_ring, rx_desc))
2609 /* verify the packet layout is correct */
2610 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2615 /* probably a little skewed due to removing CRC */
2616 total_bytes += skb->len;
2618 /* populate checksum, VLAN, and protocol */
2619 igc_process_skb_fields(rx_ring, rx_desc, skb);
2621 napi_gro_receive(&q_vector->napi, skb);
2623 /* reset skb pointer */
2626 /* update budget accounting */
2631 igc_finalize_xdp(adapter, xdp_status);
2633 /* place incomplete frames back on ring for completion */
2636 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2639 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2641 return total_packets;
2644 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2645 struct xdp_buff *xdp)
2647 unsigned int totalsize = xdp->data_end - xdp->data_meta;
2648 unsigned int metasize = xdp->data - xdp->data_meta;
2649 struct sk_buff *skb;
2651 net_prefetch(xdp->data_meta);
2653 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
2654 GFP_ATOMIC | __GFP_NOWARN);
2658 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2659 ALIGN(totalsize, sizeof(long)));
2662 skb_metadata_set(skb, metasize);
2663 __skb_pull(skb, metasize);
2669 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2670 union igc_adv_rx_desc *desc,
2671 struct xdp_buff *xdp,
2674 struct igc_ring *ring = q_vector->rx.ring;
2675 struct sk_buff *skb;
2677 skb = igc_construct_skb_zc(ring, xdp);
2679 ring->rx_stats.alloc_failed++;
2684 skb_hwtstamps(skb)->hwtstamp = timestamp;
2686 if (igc_cleanup_headers(ring, desc, skb))
2689 igc_process_skb_fields(ring, desc, skb);
2690 napi_gro_receive(&q_vector->napi, skb);
2693 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
2695 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
2696 * igc_xdp_buff shares its layout with xdp_buff_xsk and private
2697 * igc_xdp_buff fields fall into xdp_buff_xsk->cb
2699 return (struct igc_xdp_buff *)xdp;
2702 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2704 struct igc_adapter *adapter = q_vector->adapter;
2705 struct igc_ring *ring = q_vector->rx.ring;
2706 u16 cleaned_count = igc_desc_unused(ring);
2707 int total_bytes = 0, total_packets = 0;
2708 u16 ntc = ring->next_to_clean;
2709 struct bpf_prog *prog;
2710 bool failure = false;
2715 prog = READ_ONCE(adapter->xdp_prog);
2717 while (likely(total_packets < budget)) {
2718 union igc_adv_rx_desc *desc;
2719 struct igc_rx_buffer *bi;
2720 struct igc_xdp_buff *ctx;
2721 ktime_t timestamp = 0;
2725 desc = IGC_RX_DESC(ring, ntc);
2726 size = le16_to_cpu(desc->wb.upper.length);
2730 /* This memory barrier is needed to keep us from reading
2731 * any other fields out of the rx_desc until we know the
2732 * descriptor has been written back
2736 bi = &ring->rx_buffer_info[ntc];
2738 ctx = xsk_buff_to_igc_ctx(bi->xdp);
2739 ctx->rx_desc = desc;
2741 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2742 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2744 ctx->rx_ts = timestamp;
2746 bi->xdp->data += IGC_TS_HDR_LEN;
2748 /* HW timestamp has been copied into local variable. Metadata
2749 * length when XDP program is called should be 0.
2751 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2752 size -= IGC_TS_HDR_LEN;
2755 bi->xdp->data_end = bi->xdp->data + size;
2756 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2758 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2761 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2763 case IGC_XDP_CONSUMED:
2764 xsk_buff_free(bi->xdp);
2767 case IGC_XDP_REDIRECT:
2773 total_bytes += size;
2777 if (ntc == ring->count)
2781 ring->next_to_clean = ntc;
2784 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2785 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2788 igc_finalize_xdp(adapter, xdp_status);
2790 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2792 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2793 if (failure || ring->next_to_clean == ring->next_to_use)
2794 xsk_set_rx_need_wakeup(ring->xsk_pool);
2796 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2797 return total_packets;
2800 return failure ? budget : total_packets;
2803 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2804 unsigned int packets, unsigned int bytes)
2806 struct igc_ring *ring = q_vector->tx.ring;
2808 u64_stats_update_begin(&ring->tx_syncp);
2809 ring->tx_stats.bytes += bytes;
2810 ring->tx_stats.packets += packets;
2811 u64_stats_update_end(&ring->tx_syncp);
2813 q_vector->tx.total_bytes += bytes;
2814 q_vector->tx.total_packets += packets;
2817 static void igc_xdp_xmit_zc(struct igc_ring *ring)
2819 struct xsk_buff_pool *pool = ring->xsk_pool;
2820 struct netdev_queue *nq = txring_txq(ring);
2821 union igc_adv_tx_desc *tx_desc = NULL;
2822 int cpu = smp_processor_id();
2823 u16 ntu = ring->next_to_use;
2824 struct xdp_desc xdp_desc;
2827 if (!netif_carrier_ok(ring->netdev))
2830 __netif_tx_lock(nq, cpu);
2832 budget = igc_desc_unused(ring);
2834 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2835 u32 cmd_type, olinfo_status;
2836 struct igc_tx_buffer *bi;
2839 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2840 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2842 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2844 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2845 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2847 tx_desc = IGC_TX_DESC(ring, ntu);
2848 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2849 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2850 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2852 bi = &ring->tx_buffer_info[ntu];
2853 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2855 bi->bytecount = xdp_desc.len;
2857 bi->time_stamp = jiffies;
2858 bi->next_to_watch = tx_desc;
2860 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2863 if (ntu == ring->count)
2867 ring->next_to_use = ntu;
2869 igc_flush_tx_descriptors(ring);
2870 xsk_tx_release(pool);
2873 __netif_tx_unlock(nq);
2877 * igc_clean_tx_irq - Reclaim resources after transmit completes
2878 * @q_vector: pointer to q_vector containing needed info
2879 * @napi_budget: Used to determine if we are in netpoll
2881 * returns true if ring is completely cleaned
2883 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2885 struct igc_adapter *adapter = q_vector->adapter;
2886 unsigned int total_bytes = 0, total_packets = 0;
2887 unsigned int budget = q_vector->tx.work_limit;
2888 struct igc_ring *tx_ring = q_vector->tx.ring;
2889 unsigned int i = tx_ring->next_to_clean;
2890 struct igc_tx_buffer *tx_buffer;
2891 union igc_adv_tx_desc *tx_desc;
2894 if (test_bit(__IGC_DOWN, &adapter->state))
2897 tx_buffer = &tx_ring->tx_buffer_info[i];
2898 tx_desc = IGC_TX_DESC(tx_ring, i);
2899 i -= tx_ring->count;
2902 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2904 /* if next_to_watch is not set then there is no work pending */
2908 /* prevent any other reads prior to eop_desc */
2911 /* if DD is not set pending work has not been completed */
2912 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2915 /* clear next_to_watch to prevent false hangs */
2916 tx_buffer->next_to_watch = NULL;
2918 /* update the statistics for this packet */
2919 total_bytes += tx_buffer->bytecount;
2920 total_packets += tx_buffer->gso_segs;
2922 switch (tx_buffer->type) {
2923 case IGC_TX_BUFFER_TYPE_XSK:
2926 case IGC_TX_BUFFER_TYPE_XDP:
2927 xdp_return_frame(tx_buffer->xdpf);
2928 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2930 case IGC_TX_BUFFER_TYPE_SKB:
2931 napi_consume_skb(tx_buffer->skb, napi_budget);
2932 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2935 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2939 /* clear last DMA location and unmap remaining buffers */
2940 while (tx_desc != eop_desc) {
2945 i -= tx_ring->count;
2946 tx_buffer = tx_ring->tx_buffer_info;
2947 tx_desc = IGC_TX_DESC(tx_ring, 0);
2950 /* unmap any remaining paged data */
2951 if (dma_unmap_len(tx_buffer, len))
2952 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2955 /* move us one more past the eop_desc for start of next pkt */
2960 i -= tx_ring->count;
2961 tx_buffer = tx_ring->tx_buffer_info;
2962 tx_desc = IGC_TX_DESC(tx_ring, 0);
2965 /* issue prefetch for next Tx descriptor */
2968 /* update budget accounting */
2970 } while (likely(budget));
2972 netdev_tx_completed_queue(txring_txq(tx_ring),
2973 total_packets, total_bytes);
2975 i += tx_ring->count;
2976 tx_ring->next_to_clean = i;
2978 igc_update_tx_stats(q_vector, total_packets, total_bytes);
2980 if (tx_ring->xsk_pool) {
2982 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2983 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2984 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2985 igc_xdp_xmit_zc(tx_ring);
2988 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2989 struct igc_hw *hw = &adapter->hw;
2991 /* Detect a transmit hang in hardware, this serializes the
2992 * check with the clearing of time_stamp and movement of i
2994 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2995 if (tx_buffer->next_to_watch &&
2996 time_after(jiffies, tx_buffer->time_stamp +
2997 (adapter->tx_timeout_factor * HZ)) &&
2998 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
2999 (rd32(IGC_TDH(tx_ring->reg_idx)) !=
3000 readl(tx_ring->tail))) {
3001 /* detected Tx unit hang */
3002 netdev_err(tx_ring->netdev,
3003 "Detected Tx Unit Hang\n"
3007 " next_to_use <%x>\n"
3008 " next_to_clean <%x>\n"
3009 "buffer_info[next_to_clean]\n"
3010 " time_stamp <%lx>\n"
3011 " next_to_watch <%p>\n"
3013 " desc.status <%x>\n",
3014 tx_ring->queue_index,
3015 rd32(IGC_TDH(tx_ring->reg_idx)),
3016 readl(tx_ring->tail),
3017 tx_ring->next_to_use,
3018 tx_ring->next_to_clean,
3019 tx_buffer->time_stamp,
3020 tx_buffer->next_to_watch,
3022 tx_buffer->next_to_watch->wb.status);
3023 netif_stop_subqueue(tx_ring->netdev,
3024 tx_ring->queue_index);
3026 /* we are about to reset, no point in enabling stuff */
3031 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3032 if (unlikely(total_packets &&
3033 netif_carrier_ok(tx_ring->netdev) &&
3034 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3035 /* Make sure that anybody stopping the queue after this
3036 * sees the new next_to_clean.
3039 if (__netif_subqueue_stopped(tx_ring->netdev,
3040 tx_ring->queue_index) &&
3041 !(test_bit(__IGC_DOWN, &adapter->state))) {
3042 netif_wake_subqueue(tx_ring->netdev,
3043 tx_ring->queue_index);
3045 u64_stats_update_begin(&tx_ring->tx_syncp);
3046 tx_ring->tx_stats.restart_queue++;
3047 u64_stats_update_end(&tx_ring->tx_syncp);
3054 static int igc_find_mac_filter(struct igc_adapter *adapter,
3055 enum igc_mac_filter_type type, const u8 *addr)
3057 struct igc_hw *hw = &adapter->hw;
3058 int max_entries = hw->mac.rar_entry_count;
3062 for (i = 0; i < max_entries; i++) {
3063 ral = rd32(IGC_RAL(i));
3064 rah = rd32(IGC_RAH(i));
3066 if (!(rah & IGC_RAH_AV))
3068 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3070 if ((rah & IGC_RAH_RAH_MASK) !=
3071 le16_to_cpup((__le16 *)(addr + 4)))
3073 if (ral != le32_to_cpup((__le32 *)(addr)))
3082 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3084 struct igc_hw *hw = &adapter->hw;
3085 int max_entries = hw->mac.rar_entry_count;
3089 for (i = 0; i < max_entries; i++) {
3090 rah = rd32(IGC_RAH(i));
3092 if (!(rah & IGC_RAH_AV))
3100 * igc_add_mac_filter() - Add MAC address filter
3101 * @adapter: Pointer to adapter where the filter should be added
3102 * @type: MAC address filter type (source or destination)
3103 * @addr: MAC address
3104 * @queue: If non-negative, queue assignment feature is enabled and frames
3105 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3106 * assignment is disabled.
3108 * Return: 0 in case of success, negative errno code otherwise.
3110 static int igc_add_mac_filter(struct igc_adapter *adapter,
3111 enum igc_mac_filter_type type, const u8 *addr,
3114 struct net_device *dev = adapter->netdev;
3117 index = igc_find_mac_filter(adapter, type, addr);
3121 index = igc_get_avail_mac_filter_slot(adapter);
3125 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3126 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3130 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3135 * igc_del_mac_filter() - Delete MAC address filter
3136 * @adapter: Pointer to adapter where the filter should be deleted from
3137 * @type: MAC address filter type (source or destination)
3138 * @addr: MAC address
3140 static void igc_del_mac_filter(struct igc_adapter *adapter,
3141 enum igc_mac_filter_type type, const u8 *addr)
3143 struct net_device *dev = adapter->netdev;
3146 index = igc_find_mac_filter(adapter, type, addr);
3151 /* If this is the default filter, we don't actually delete it.
3152 * We just reset to its default value i.e. disable queue
3155 netdev_dbg(dev, "Disable default MAC filter queue assignment");
3157 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3159 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3161 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3164 igc_clear_mac_filter_hw(adapter, index);
3169 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3170 * @adapter: Pointer to adapter where the filter should be added
3171 * @prio: VLAN priority value
3172 * @queue: Queue number which matching frames are assigned to
3174 * Return: 0 in case of success, negative errno code otherwise.
3176 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3179 struct net_device *dev = adapter->netdev;
3180 struct igc_hw *hw = &adapter->hw;
3183 vlanpqf = rd32(IGC_VLANPQF);
3185 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3186 netdev_dbg(dev, "VLAN priority filter already in use\n");
3190 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3191 vlanpqf |= IGC_VLANPQF_VALID(prio);
3193 wr32(IGC_VLANPQF, vlanpqf);
3195 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3201 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3202 * @adapter: Pointer to adapter where the filter should be deleted from
3203 * @prio: VLAN priority value
3205 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3207 struct igc_hw *hw = &adapter->hw;
3210 vlanpqf = rd32(IGC_VLANPQF);
3212 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3213 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3215 wr32(IGC_VLANPQF, vlanpqf);
3217 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3221 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3223 struct igc_hw *hw = &adapter->hw;
3226 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3227 u32 etqf = rd32(IGC_ETQF(i));
3229 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3237 * igc_add_etype_filter() - Add ethertype filter
3238 * @adapter: Pointer to adapter where the filter should be added
3239 * @etype: Ethertype value
3240 * @queue: If non-negative, queue assignment feature is enabled and frames
3241 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3242 * assignment is disabled.
3244 * Return: 0 in case of success, negative errno code otherwise.
3246 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3249 struct igc_hw *hw = &adapter->hw;
3253 index = igc_get_avail_etype_filter_slot(adapter);
3257 etqf = rd32(IGC_ETQF(index));
3259 etqf &= ~IGC_ETQF_ETYPE_MASK;
3263 etqf &= ~IGC_ETQF_QUEUE_MASK;
3264 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3265 etqf |= IGC_ETQF_QUEUE_ENABLE;
3268 etqf |= IGC_ETQF_FILTER_ENABLE;
3270 wr32(IGC_ETQF(index), etqf);
3272 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3277 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3279 struct igc_hw *hw = &adapter->hw;
3282 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3283 u32 etqf = rd32(IGC_ETQF(i));
3285 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3293 * igc_del_etype_filter() - Delete ethertype filter
3294 * @adapter: Pointer to adapter where the filter should be deleted from
3295 * @etype: Ethertype value
3297 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3299 struct igc_hw *hw = &adapter->hw;
3302 index = igc_find_etype_filter(adapter, etype);
3306 wr32(IGC_ETQF(index), 0);
3308 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3312 static int igc_flex_filter_select(struct igc_adapter *adapter,
3313 struct igc_flex_filter *input,
3316 struct igc_hw *hw = &adapter->hw;
3320 if (input->index >= MAX_FLEX_FILTER) {
3321 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
3325 /* Indirect table select register */
3326 fhftsl = rd32(IGC_FHFTSL);
3327 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3328 switch (input->index) {
3342 wr32(IGC_FHFTSL, fhftsl);
3344 /* Normalize index down to host table register */
3345 fhft_index = input->index % 8;
3347 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3348 IGC_FHFT_EXT(fhft_index - 4);
3353 static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3354 struct igc_flex_filter *input)
3356 struct device *dev = &adapter->pdev->dev;
3357 struct igc_hw *hw = &adapter->hw;
3358 u8 *data = input->data;
3359 u8 *mask = input->mask;
3366 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3367 * out early to avoid surprises later.
3369 if (input->length % 8 != 0) {
3370 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
3374 /* Select corresponding flex filter register and get base for host table. */
3375 ret = igc_flex_filter_select(adapter, input, &fhft);
3379 /* When adding a filter globally disable flex filter feature. That is
3380 * recommended within the datasheet.
3382 wufc = rd32(IGC_WUFC);
3383 wufc &= ~IGC_WUFC_FLEX_HQ;
3384 wr32(IGC_WUFC, wufc);
3386 /* Configure filter */
3387 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3388 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
3389 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
3391 if (input->immediate_irq)
3392 queuing |= IGC_FHFT_IMM_INT;
3395 queuing |= IGC_FHFT_DROP;
3397 wr32(fhft + 0xFC, queuing);
3399 /* Write data (128 byte) and mask (128 bit) */
3400 for (i = 0; i < 16; ++i) {
3401 const size_t data_idx = i * 8;
3402 const size_t row_idx = i * 16;
3404 (data[data_idx + 0] << 0) |
3405 (data[data_idx + 1] << 8) |
3406 (data[data_idx + 2] << 16) |
3407 (data[data_idx + 3] << 24);
3409 (data[data_idx + 4] << 0) |
3410 (data[data_idx + 5] << 8) |
3411 (data[data_idx + 6] << 16) |
3412 (data[data_idx + 7] << 24);
3415 /* Write row: dw0, dw1 and mask */
3416 wr32(fhft + row_idx, dw0);
3417 wr32(fhft + row_idx + 4, dw1);
3419 /* mask is only valid for MASK(7, 0) */
3420 tmp = rd32(fhft + row_idx + 8);
3421 tmp &= ~GENMASK(7, 0);
3423 wr32(fhft + row_idx + 8, tmp);
3426 /* Enable filter. */
3427 wufc |= IGC_WUFC_FLEX_HQ;
3428 if (input->index > 8) {
3429 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3430 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3432 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3434 wr32(IGC_WUFC_EXT, wufc_ext);
3436 wufc |= (IGC_WUFC_FLX0 << input->index);
3438 wr32(IGC_WUFC, wufc);
3440 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
3446 static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3447 const void *src, unsigned int offset,
3448 size_t len, const void *mask)
3453 memcpy(&flex->data[offset], src, len);
3456 for (i = 0; i < len; ++i) {
3457 const unsigned int idx = i + offset;
3458 const u8 *ptr = mask;
3462 flex->mask[idx / 8] |= BIT(idx % 8);
3467 flex->mask[idx / 8] |= BIT(idx % 8);
3471 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3473 struct igc_hw *hw = &adapter->hw;
3477 wufc = rd32(IGC_WUFC);
3478 wufc_ext = rd32(IGC_WUFC_EXT);
3480 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3482 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3485 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3493 static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3495 struct igc_hw *hw = &adapter->hw;
3498 wufc = rd32(IGC_WUFC);
3499 wufc_ext = rd32(IGC_WUFC_EXT);
3501 if (wufc & IGC_WUFC_FILTER_MASK)
3504 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3510 static int igc_add_flex_filter(struct igc_adapter *adapter,
3511 struct igc_nfc_rule *rule)
3513 struct igc_flex_filter flex = { };
3514 struct igc_nfc_filter *filter = &rule->filter;
3515 unsigned int eth_offset, user_offset;
3519 index = igc_find_avail_flex_filter_slot(adapter);
3523 /* Construct the flex filter:
3530 * -> = 26 bytes => 32 length
3534 flex.rx_queue = rule->action;
3536 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3537 eth_offset = vlan ? 16 : 12;
3538 user_offset = vlan ? 18 : 14;
3540 /* Add destination MAC */
3541 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3542 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3545 /* Add source MAC */
3546 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3547 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3550 /* Add VLAN etype */
3551 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
3552 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
3553 sizeof(filter->vlan_etype),
3557 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3558 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3559 sizeof(filter->vlan_tci), NULL);
3561 /* Add Ether type */
3562 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3563 __be16 etype = cpu_to_be16(filter->etype);
3565 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3566 sizeof(etype), NULL);
3570 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3571 igc_flex_filter_add_field(&flex, &filter->user_data,
3573 sizeof(filter->user_data),
3576 /* Add it down to the hardware and enable it. */
3577 ret = igc_write_flex_filter_ll(adapter, &flex);
3581 filter->flex_index = index;
3586 static void igc_del_flex_filter(struct igc_adapter *adapter,
3589 struct igc_hw *hw = &adapter->hw;
3592 /* Just disable the filter. The filter table itself is kept
3593 * intact. Another flex_filter_add() should override the "old" data
3596 if (reg_index > 8) {
3597 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3599 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3600 wr32(IGC_WUFC_EXT, wufc_ext);
3602 wufc = rd32(IGC_WUFC);
3604 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3605 wr32(IGC_WUFC, wufc);
3608 if (igc_flex_filter_in_use(adapter))
3611 /* No filters are in use, we may disable flex filters */
3612 wufc = rd32(IGC_WUFC);
3613 wufc &= ~IGC_WUFC_FLEX_HQ;
3614 wr32(IGC_WUFC, wufc);
3617 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3618 struct igc_nfc_rule *rule)
3623 return igc_add_flex_filter(adapter, rule);
3626 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3627 err = igc_add_etype_filter(adapter, rule->filter.etype,
3633 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3634 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3635 rule->filter.src_addr, rule->action);
3640 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3641 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3642 rule->filter.dst_addr, rule->action);
3647 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3648 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3651 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3659 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3660 const struct igc_nfc_rule *rule)
3663 igc_del_flex_filter(adapter, rule->filter.flex_index);
3667 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3668 igc_del_etype_filter(adapter, rule->filter.etype);
3670 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3671 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3674 igc_del_vlan_prio_filter(adapter, prio);
3677 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3678 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3679 rule->filter.src_addr);
3681 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3682 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3683 rule->filter.dst_addr);
3687 * igc_get_nfc_rule() - Get NFC rule
3688 * @adapter: Pointer to adapter
3689 * @location: Rule location
3691 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3693 * Return: Pointer to NFC rule at @location. If not found, NULL.
3695 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3698 struct igc_nfc_rule *rule;
3700 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3701 if (rule->location == location)
3703 if (rule->location > location)
3711 * igc_del_nfc_rule() - Delete NFC rule
3712 * @adapter: Pointer to adapter
3713 * @rule: Pointer to rule to be deleted
3715 * Disable NFC rule in hardware and delete it from adapter.
3717 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3719 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3721 igc_disable_nfc_rule(adapter, rule);
3723 list_del(&rule->list);
3724 adapter->nfc_rule_count--;
3729 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3731 struct igc_nfc_rule *rule, *tmp;
3733 mutex_lock(&adapter->nfc_rule_lock);
3735 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3736 igc_del_nfc_rule(adapter, rule);
3738 mutex_unlock(&adapter->nfc_rule_lock);
3742 * igc_add_nfc_rule() - Add NFC rule
3743 * @adapter: Pointer to adapter
3744 * @rule: Pointer to rule to be added
3746 * Enable NFC rule in hardware and add it to adapter.
3748 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3750 * Return: 0 on success, negative errno on failure.
3752 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3754 struct igc_nfc_rule *pred, *cur;
3757 err = igc_enable_nfc_rule(adapter, rule);
3762 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3763 if (cur->location >= rule->location)
3768 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3769 adapter->nfc_rule_count++;
3773 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3775 struct igc_nfc_rule *rule;
3777 mutex_lock(&adapter->nfc_rule_lock);
3779 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3780 igc_enable_nfc_rule(adapter, rule);
3782 mutex_unlock(&adapter->nfc_rule_lock);
3785 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3787 struct igc_adapter *adapter = netdev_priv(netdev);
3789 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3792 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3794 struct igc_adapter *adapter = netdev_priv(netdev);
3796 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3801 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3802 * @netdev: network interface device structure
3804 * The set_rx_mode entry point is called whenever the unicast or multicast
3805 * address lists or the network interface flags are updated. This routine is
3806 * responsible for configuring the hardware for proper unicast, multicast,
3807 * promiscuous mode, and all-multi behavior.
3809 static void igc_set_rx_mode(struct net_device *netdev)
3811 struct igc_adapter *adapter = netdev_priv(netdev);
3812 struct igc_hw *hw = &adapter->hw;
3813 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3816 /* Check for Promiscuous and All Multicast modes */
3817 if (netdev->flags & IFF_PROMISC) {
3818 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3820 if (netdev->flags & IFF_ALLMULTI) {
3821 rctl |= IGC_RCTL_MPE;
3823 /* Write addresses to the MTA, if the attempt fails
3824 * then we should just turn on promiscuous mode so
3825 * that we can at least receive multicast traffic
3827 count = igc_write_mc_addr_list(netdev);
3829 rctl |= IGC_RCTL_MPE;
3833 /* Write addresses to available RAR registers, if there is not
3834 * sufficient space to store all the addresses then enable
3835 * unicast promiscuous mode
3837 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3838 rctl |= IGC_RCTL_UPE;
3840 /* update state of unicast and multicast */
3841 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3842 wr32(IGC_RCTL, rctl);
3844 #if (PAGE_SIZE < 8192)
3845 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3846 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3848 wr32(IGC_RLPML, rlpml);
3852 * igc_configure - configure the hardware for RX and TX
3853 * @adapter: private board structure
3855 static void igc_configure(struct igc_adapter *adapter)
3857 struct net_device *netdev = adapter->netdev;
3860 igc_get_hw_control(adapter);
3861 igc_set_rx_mode(netdev);
3863 igc_restore_vlan(adapter);
3865 igc_setup_tctl(adapter);
3866 igc_setup_mrqc(adapter);
3867 igc_setup_rctl(adapter);
3869 igc_set_default_mac_filter(adapter);
3870 igc_restore_nfc_rules(adapter);
3872 igc_configure_tx(adapter);
3873 igc_configure_rx(adapter);
3875 igc_rx_fifo_flush_base(&adapter->hw);
3877 /* call igc_desc_unused which always leaves
3878 * at least 1 descriptor unused to make sure
3879 * next_to_use != next_to_clean
3881 for (i = 0; i < adapter->num_rx_queues; i++) {
3882 struct igc_ring *ring = adapter->rx_ring[i];
3885 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3887 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3892 * igc_write_ivar - configure ivar for given MSI-X vector
3893 * @hw: pointer to the HW structure
3894 * @msix_vector: vector number we are allocating to a given ring
3895 * @index: row index of IVAR register to write within IVAR table
3896 * @offset: column offset of in IVAR, should be multiple of 8
3898 * The IVAR table consists of 2 columns,
3899 * each containing an cause allocation for an Rx and Tx ring, and a
3900 * variable number of rows depending on the number of queues supported.
3902 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3903 int index, int offset)
3905 u32 ivar = array_rd32(IGC_IVAR0, index);
3907 /* clear any bits that are currently set */
3908 ivar &= ~((u32)0xFF << offset);
3910 /* write vector and valid bit */
3911 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3913 array_wr32(IGC_IVAR0, index, ivar);
3916 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3918 struct igc_adapter *adapter = q_vector->adapter;
3919 struct igc_hw *hw = &adapter->hw;
3920 int rx_queue = IGC_N0_QUEUE;
3921 int tx_queue = IGC_N0_QUEUE;
3923 if (q_vector->rx.ring)
3924 rx_queue = q_vector->rx.ring->reg_idx;
3925 if (q_vector->tx.ring)
3926 tx_queue = q_vector->tx.ring->reg_idx;
3928 switch (hw->mac.type) {
3930 if (rx_queue > IGC_N0_QUEUE)
3931 igc_write_ivar(hw, msix_vector,
3933 (rx_queue & 0x1) << 4);
3934 if (tx_queue > IGC_N0_QUEUE)
3935 igc_write_ivar(hw, msix_vector,
3937 ((tx_queue & 0x1) << 4) + 8);
3938 q_vector->eims_value = BIT(msix_vector);
3941 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3945 /* add q_vector eims value to global eims_enable_mask */
3946 adapter->eims_enable_mask |= q_vector->eims_value;
3948 /* configure q_vector to set itr on first interrupt */
3949 q_vector->set_itr = 1;
3953 * igc_configure_msix - Configure MSI-X hardware
3954 * @adapter: Pointer to adapter structure
3956 * igc_configure_msix sets up the hardware to properly
3957 * generate MSI-X interrupts.
3959 static void igc_configure_msix(struct igc_adapter *adapter)
3961 struct igc_hw *hw = &adapter->hw;
3965 adapter->eims_enable_mask = 0;
3967 /* set vector for other causes, i.e. link changes */
3968 switch (hw->mac.type) {
3970 /* Turn on MSI-X capability first, or our settings
3971 * won't stick. And it will take days to debug.
3973 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3974 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3977 /* enable msix_other interrupt */
3978 adapter->eims_other = BIT(vector);
3979 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3981 wr32(IGC_IVAR_MISC, tmp);
3984 /* do nothing, since nothing else supports MSI-X */
3986 } /* switch (hw->mac.type) */
3988 adapter->eims_enable_mask |= adapter->eims_other;
3990 for (i = 0; i < adapter->num_q_vectors; i++)
3991 igc_assign_vector(adapter->q_vector[i], vector++);
3997 * igc_irq_enable - Enable default interrupt generation settings
3998 * @adapter: board private structure
4000 static void igc_irq_enable(struct igc_adapter *adapter)
4002 struct igc_hw *hw = &adapter->hw;
4004 if (adapter->msix_entries) {
4005 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
4006 u32 regval = rd32(IGC_EIAC);
4008 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
4009 regval = rd32(IGC_EIAM);
4010 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
4011 wr32(IGC_EIMS, adapter->eims_enable_mask);
4014 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4015 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4020 * igc_irq_disable - Mask off interrupt generation on the NIC
4021 * @adapter: board private structure
4023 static void igc_irq_disable(struct igc_adapter *adapter)
4025 struct igc_hw *hw = &adapter->hw;
4027 if (adapter->msix_entries) {
4028 u32 regval = rd32(IGC_EIAM);
4030 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
4031 wr32(IGC_EIMC, adapter->eims_enable_mask);
4032 regval = rd32(IGC_EIAC);
4033 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
4040 if (adapter->msix_entries) {
4043 synchronize_irq(adapter->msix_entries[vector++].vector);
4045 for (i = 0; i < adapter->num_q_vectors; i++)
4046 synchronize_irq(adapter->msix_entries[vector++].vector);
4048 synchronize_irq(adapter->pdev->irq);
4052 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4053 const u32 max_rss_queues)
4055 /* Determine if we need to pair queues. */
4056 /* If rss_queues > half of max_rss_queues, pair the queues in
4057 * order to conserve interrupts due to limited supply.
4059 if (adapter->rss_queues > (max_rss_queues / 2))
4060 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4062 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4065 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4067 return IGC_MAX_RX_QUEUES;
4070 static void igc_init_queue_configuration(struct igc_adapter *adapter)
4074 max_rss_queues = igc_get_max_rss_queues(adapter);
4075 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4077 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4081 * igc_reset_q_vector - Reset config for interrupt vector
4082 * @adapter: board private structure to initialize
4083 * @v_idx: Index of vector to be reset
4085 * If NAPI is enabled it will delete any references to the
4086 * NAPI struct. This is preparation for igc_free_q_vector.
4088 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4090 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4092 /* if we're coming from igc_set_interrupt_capability, the vectors are
4098 if (q_vector->tx.ring)
4099 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4101 if (q_vector->rx.ring)
4102 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4104 netif_napi_del(&q_vector->napi);
4108 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4109 * @adapter: board private structure to initialize
4110 * @v_idx: Index of vector to be freed
4112 * This function frees the memory allocated to the q_vector.
4114 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4116 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4118 adapter->q_vector[v_idx] = NULL;
4120 /* igc_get_stats64() might access the rings on this vector,
4121 * we must wait a grace period before freeing it.
4124 kfree_rcu(q_vector, rcu);
4128 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4129 * @adapter: board private structure to initialize
4131 * This function frees the memory allocated to the q_vectors. In addition if
4132 * NAPI is enabled it will delete any references to the NAPI struct prior
4133 * to freeing the q_vector.
4135 static void igc_free_q_vectors(struct igc_adapter *adapter)
4137 int v_idx = adapter->num_q_vectors;
4139 adapter->num_tx_queues = 0;
4140 adapter->num_rx_queues = 0;
4141 adapter->num_q_vectors = 0;
4144 igc_reset_q_vector(adapter, v_idx);
4145 igc_free_q_vector(adapter, v_idx);
4150 * igc_update_itr - update the dynamic ITR value based on statistics
4151 * @q_vector: pointer to q_vector
4152 * @ring_container: ring info to update the itr for
4154 * Stores a new ITR value based on packets and byte
4155 * counts during the last interrupt. The advantage of per interrupt
4156 * computation is faster updates and more accurate ITR for the current
4157 * traffic pattern. Constants in this function were computed
4158 * based on theoretical maximum wire speed and thresholds were set based
4159 * on testing data as well as attempting to minimize response time
4160 * while increasing bulk throughput.
4161 * NOTE: These calculations are only valid when operating in a single-
4162 * queue environment.
4164 static void igc_update_itr(struct igc_q_vector *q_vector,
4165 struct igc_ring_container *ring_container)
4167 unsigned int packets = ring_container->total_packets;
4168 unsigned int bytes = ring_container->total_bytes;
4169 u8 itrval = ring_container->itr;
4171 /* no packets, exit with status unchanged */
4176 case lowest_latency:
4177 /* handle TSO and jumbo frames */
4178 if (bytes / packets > 8000)
4179 itrval = bulk_latency;
4180 else if ((packets < 5) && (bytes > 512))
4181 itrval = low_latency;
4183 case low_latency: /* 50 usec aka 20000 ints/s */
4184 if (bytes > 10000) {
4185 /* this if handles the TSO accounting */
4186 if (bytes / packets > 8000)
4187 itrval = bulk_latency;
4188 else if ((packets < 10) || ((bytes / packets) > 1200))
4189 itrval = bulk_latency;
4190 else if ((packets > 35))
4191 itrval = lowest_latency;
4192 } else if (bytes / packets > 2000) {
4193 itrval = bulk_latency;
4194 } else if (packets <= 2 && bytes < 512) {
4195 itrval = lowest_latency;
4198 case bulk_latency: /* 250 usec aka 4000 ints/s */
4199 if (bytes > 25000) {
4201 itrval = low_latency;
4202 } else if (bytes < 1500) {
4203 itrval = low_latency;
4208 /* clear work counters since we have the values we need */
4209 ring_container->total_bytes = 0;
4210 ring_container->total_packets = 0;
4212 /* write updated itr to ring container */
4213 ring_container->itr = itrval;
4216 static void igc_set_itr(struct igc_q_vector *q_vector)
4218 struct igc_adapter *adapter = q_vector->adapter;
4219 u32 new_itr = q_vector->itr_val;
4222 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4223 switch (adapter->link_speed) {
4227 new_itr = IGC_4K_ITR;
4233 igc_update_itr(q_vector, &q_vector->tx);
4234 igc_update_itr(q_vector, &q_vector->rx);
4236 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4238 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4239 if (current_itr == lowest_latency &&
4240 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4241 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4242 current_itr = low_latency;
4244 switch (current_itr) {
4245 /* counts and packets in update_itr are dependent on these numbers */
4246 case lowest_latency:
4247 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4250 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4253 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4260 if (new_itr != q_vector->itr_val) {
4261 /* this attempts to bias the interrupt rate towards Bulk
4262 * by adding intermediate steps when interrupt rate is
4265 new_itr = new_itr > q_vector->itr_val ?
4266 max((new_itr * q_vector->itr_val) /
4267 (new_itr + (q_vector->itr_val >> 2)),
4269 /* Don't write the value here; it resets the adapter's
4270 * internal timer, and causes us to delay far longer than
4271 * we should between interrupts. Instead, we write the ITR
4272 * value at the beginning of the next interrupt so the timing
4273 * ends up being correct.
4275 q_vector->itr_val = new_itr;
4276 q_vector->set_itr = 1;
4280 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4282 int v_idx = adapter->num_q_vectors;
4284 if (adapter->msix_entries) {
4285 pci_disable_msix(adapter->pdev);
4286 kfree(adapter->msix_entries);
4287 adapter->msix_entries = NULL;
4288 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4289 pci_disable_msi(adapter->pdev);
4293 igc_reset_q_vector(adapter, v_idx);
4297 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4298 * @adapter: Pointer to adapter structure
4299 * @msix: boolean value for MSI-X capability
4301 * Attempt to configure interrupts using the best available
4302 * capabilities of the hardware and kernel.
4304 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4312 adapter->flags |= IGC_FLAG_HAS_MSIX;
4314 /* Number of supported queues. */
4315 adapter->num_rx_queues = adapter->rss_queues;
4317 adapter->num_tx_queues = adapter->rss_queues;
4319 /* start with one vector for every Rx queue */
4320 numvecs = adapter->num_rx_queues;
4322 /* if Tx handler is separate add 1 for every Tx queue */
4323 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4324 numvecs += adapter->num_tx_queues;
4326 /* store the number of vectors reserved for queues */
4327 adapter->num_q_vectors = numvecs;
4329 /* add 1 vector for link status interrupts */
4332 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4335 if (!adapter->msix_entries)
4338 /* populate entry values */
4339 for (i = 0; i < numvecs; i++)
4340 adapter->msix_entries[i].entry = i;
4342 err = pci_enable_msix_range(adapter->pdev,
4343 adapter->msix_entries,
4349 kfree(adapter->msix_entries);
4350 adapter->msix_entries = NULL;
4352 igc_reset_interrupt_capability(adapter);
4355 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4357 adapter->rss_queues = 1;
4358 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4359 adapter->num_rx_queues = 1;
4360 adapter->num_tx_queues = 1;
4361 adapter->num_q_vectors = 1;
4362 if (!pci_enable_msi(adapter->pdev))
4363 adapter->flags |= IGC_FLAG_HAS_MSI;
4367 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4368 * @q_vector: pointer to q_vector
4370 * Stores a new ITR value based on strictly on packet size. This
4371 * algorithm is less sophisticated than that used in igc_update_itr,
4372 * due to the difficulty of synchronizing statistics across multiple
4373 * receive rings. The divisors and thresholds used by this function
4374 * were determined based on theoretical maximum wire speed and testing
4375 * data, in order to minimize response time while increasing bulk
4377 * NOTE: This function is called only when operating in a multiqueue
4378 * receive environment.
4380 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4382 struct igc_adapter *adapter = q_vector->adapter;
4383 int new_val = q_vector->itr_val;
4384 int avg_wire_size = 0;
4385 unsigned int packets;
4387 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4388 * ints/sec - ITR timer value of 120 ticks.
4390 switch (adapter->link_speed) {
4393 new_val = IGC_4K_ITR;
4399 packets = q_vector->rx.total_packets;
4401 avg_wire_size = q_vector->rx.total_bytes / packets;
4403 packets = q_vector->tx.total_packets;
4405 avg_wire_size = max_t(u32, avg_wire_size,
4406 q_vector->tx.total_bytes / packets);
4408 /* if avg_wire_size isn't set no work was done */
4412 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4413 avg_wire_size += 24;
4415 /* Don't starve jumbo frames */
4416 avg_wire_size = min(avg_wire_size, 3000);
4418 /* Give a little boost to mid-size frames */
4419 if (avg_wire_size > 300 && avg_wire_size < 1200)
4420 new_val = avg_wire_size / 3;
4422 new_val = avg_wire_size / 2;
4424 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4425 if (new_val < IGC_20K_ITR &&
4426 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4427 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4428 new_val = IGC_20K_ITR;
4431 if (new_val != q_vector->itr_val) {
4432 q_vector->itr_val = new_val;
4433 q_vector->set_itr = 1;
4436 q_vector->rx.total_bytes = 0;
4437 q_vector->rx.total_packets = 0;
4438 q_vector->tx.total_bytes = 0;
4439 q_vector->tx.total_packets = 0;
4442 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4444 struct igc_adapter *adapter = q_vector->adapter;
4445 struct igc_hw *hw = &adapter->hw;
4447 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4448 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4449 if (adapter->num_q_vectors == 1)
4450 igc_set_itr(q_vector);
4452 igc_update_ring_itr(q_vector);
4455 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4456 if (adapter->msix_entries)
4457 wr32(IGC_EIMS, q_vector->eims_value);
4459 igc_irq_enable(adapter);
4463 static void igc_add_ring(struct igc_ring *ring,
4464 struct igc_ring_container *head)
4471 * igc_cache_ring_register - Descriptor ring to register mapping
4472 * @adapter: board private structure to initialize
4474 * Once we know the feature-set enabled for the device, we'll cache
4475 * the register offset the descriptor ring is assigned to.
4477 static void igc_cache_ring_register(struct igc_adapter *adapter)
4481 switch (adapter->hw.mac.type) {
4484 for (; i < adapter->num_rx_queues; i++)
4485 adapter->rx_ring[i]->reg_idx = i;
4486 for (; j < adapter->num_tx_queues; j++)
4487 adapter->tx_ring[j]->reg_idx = j;
4493 * igc_poll - NAPI Rx polling callback
4494 * @napi: napi polling structure
4495 * @budget: count of how many packets we should handle
4497 static int igc_poll(struct napi_struct *napi, int budget)
4499 struct igc_q_vector *q_vector = container_of(napi,
4500 struct igc_q_vector,
4502 struct igc_ring *rx_ring = q_vector->rx.ring;
4503 bool clean_complete = true;
4506 if (q_vector->tx.ring)
4507 clean_complete = igc_clean_tx_irq(q_vector, budget);
4510 int cleaned = rx_ring->xsk_pool ?
4511 igc_clean_rx_irq_zc(q_vector, budget) :
4512 igc_clean_rx_irq(q_vector, budget);
4514 work_done += cleaned;
4515 if (cleaned >= budget)
4516 clean_complete = false;
4519 /* If all work not completed, return budget and keep polling */
4520 if (!clean_complete)
4523 /* Exit the polling mode, but don't re-enable interrupts if stack might
4524 * poll us due to busy-polling
4526 if (likely(napi_complete_done(napi, work_done)))
4527 igc_ring_irq_enable(q_vector);
4529 return min(work_done, budget - 1);
4533 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4534 * @adapter: board private structure to initialize
4535 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4536 * @v_idx: index of vector in adapter struct
4537 * @txr_count: total number of Tx rings to allocate
4538 * @txr_idx: index of first Tx ring to allocate
4539 * @rxr_count: total number of Rx rings to allocate
4540 * @rxr_idx: index of first Rx ring to allocate
4542 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4544 static int igc_alloc_q_vector(struct igc_adapter *adapter,
4545 unsigned int v_count, unsigned int v_idx,
4546 unsigned int txr_count, unsigned int txr_idx,
4547 unsigned int rxr_count, unsigned int rxr_idx)
4549 struct igc_q_vector *q_vector;
4550 struct igc_ring *ring;
4553 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4554 if (txr_count > 1 || rxr_count > 1)
4557 ring_count = txr_count + rxr_count;
4559 /* allocate q_vector and rings */
4560 q_vector = adapter->q_vector[v_idx];
4562 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4565 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4569 /* initialize NAPI */
4570 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4572 /* tie q_vector and adapter together */
4573 adapter->q_vector[v_idx] = q_vector;
4574 q_vector->adapter = adapter;
4576 /* initialize work limits */
4577 q_vector->tx.work_limit = adapter->tx_work_limit;
4579 /* initialize ITR configuration */
4580 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4581 q_vector->itr_val = IGC_START_ITR;
4583 /* initialize pointer to rings */
4584 ring = q_vector->ring;
4586 /* initialize ITR */
4588 /* rx or rx/tx vector */
4589 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4590 q_vector->itr_val = adapter->rx_itr_setting;
4592 /* tx only vector */
4593 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4594 q_vector->itr_val = adapter->tx_itr_setting;
4598 /* assign generic ring traits */
4599 ring->dev = &adapter->pdev->dev;
4600 ring->netdev = adapter->netdev;
4602 /* configure backlink on ring */
4603 ring->q_vector = q_vector;
4605 /* update q_vector Tx values */
4606 igc_add_ring(ring, &q_vector->tx);
4608 /* apply Tx specific ring traits */
4609 ring->count = adapter->tx_ring_count;
4610 ring->queue_index = txr_idx;
4612 /* assign ring to adapter */
4613 adapter->tx_ring[txr_idx] = ring;
4615 /* push pointer to next ring */
4620 /* assign generic ring traits */
4621 ring->dev = &adapter->pdev->dev;
4622 ring->netdev = adapter->netdev;
4624 /* configure backlink on ring */
4625 ring->q_vector = q_vector;
4627 /* update q_vector Rx values */
4628 igc_add_ring(ring, &q_vector->rx);
4630 /* apply Rx specific ring traits */
4631 ring->count = adapter->rx_ring_count;
4632 ring->queue_index = rxr_idx;
4634 /* assign ring to adapter */
4635 adapter->rx_ring[rxr_idx] = ring;
4642 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4643 * @adapter: board private structure to initialize
4645 * We allocate one q_vector per queue interrupt. If allocation fails we
4648 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4650 int rxr_remaining = adapter->num_rx_queues;
4651 int txr_remaining = adapter->num_tx_queues;
4652 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4653 int q_vectors = adapter->num_q_vectors;
4656 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4657 for (; rxr_remaining; v_idx++) {
4658 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4664 /* update counts and index */
4670 for (; v_idx < q_vectors; v_idx++) {
4671 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4672 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4674 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4675 tqpv, txr_idx, rqpv, rxr_idx);
4680 /* update counts and index */
4681 rxr_remaining -= rqpv;
4682 txr_remaining -= tqpv;
4690 adapter->num_tx_queues = 0;
4691 adapter->num_rx_queues = 0;
4692 adapter->num_q_vectors = 0;
4695 igc_free_q_vector(adapter, v_idx);
4701 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4702 * @adapter: Pointer to adapter structure
4703 * @msix: boolean for MSI-X capability
4705 * This function initializes the interrupts and allocates all of the queues.
4707 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4709 struct net_device *dev = adapter->netdev;
4712 igc_set_interrupt_capability(adapter, msix);
4714 err = igc_alloc_q_vectors(adapter);
4716 netdev_err(dev, "Unable to allocate memory for vectors\n");
4717 goto err_alloc_q_vectors;
4720 igc_cache_ring_register(adapter);
4724 err_alloc_q_vectors:
4725 igc_reset_interrupt_capability(adapter);
4730 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4731 * @adapter: board private structure to initialize
4733 * igc_sw_init initializes the Adapter private data structure.
4734 * Fields are initialized based on PCI device information and
4735 * OS network device settings (MTU size).
4737 static int igc_sw_init(struct igc_adapter *adapter)
4739 struct net_device *netdev = adapter->netdev;
4740 struct pci_dev *pdev = adapter->pdev;
4741 struct igc_hw *hw = &adapter->hw;
4743 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4745 /* set default ring sizes */
4746 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4747 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4749 /* set default ITR values */
4750 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4751 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4753 /* set default work limits */
4754 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4756 /* adjust max frame to be at least the size of a standard frame */
4757 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4759 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4761 mutex_init(&adapter->nfc_rule_lock);
4762 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4763 adapter->nfc_rule_count = 0;
4765 spin_lock_init(&adapter->stats64_lock);
4766 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4767 adapter->flags |= IGC_FLAG_HAS_MSIX;
4769 igc_init_queue_configuration(adapter);
4771 /* This call may decrease the number of queues */
4772 if (igc_init_interrupt_scheme(adapter, true)) {
4773 netdev_err(netdev, "Unable to allocate memory for queues\n");
4777 /* Explicitly disable IRQ since the NIC can be in any state. */
4778 igc_irq_disable(adapter);
4780 set_bit(__IGC_DOWN, &adapter->state);
4786 * igc_up - Open the interface and prepare it to handle traffic
4787 * @adapter: board private structure
4789 void igc_up(struct igc_adapter *adapter)
4791 struct igc_hw *hw = &adapter->hw;
4794 /* hardware has been reset, we need to reload some things */
4795 igc_configure(adapter);
4797 clear_bit(__IGC_DOWN, &adapter->state);
4799 for (i = 0; i < adapter->num_q_vectors; i++)
4800 napi_enable(&adapter->q_vector[i]->napi);
4802 if (adapter->msix_entries)
4803 igc_configure_msix(adapter);
4805 igc_assign_vector(adapter->q_vector[0], 0);
4807 /* Clear any pending interrupts. */
4809 igc_irq_enable(adapter);
4811 netif_tx_start_all_queues(adapter->netdev);
4813 /* start the watchdog. */
4814 hw->mac.get_link_status = true;
4815 schedule_work(&adapter->watchdog_task);
4819 * igc_update_stats - Update the board statistics counters
4820 * @adapter: board private structure
4822 void igc_update_stats(struct igc_adapter *adapter)
4824 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4825 struct pci_dev *pdev = adapter->pdev;
4826 struct igc_hw *hw = &adapter->hw;
4827 u64 _bytes, _packets;
4833 /* Prevent stats update while adapter is being reset, or if the pci
4834 * connection is down.
4836 if (adapter->link_speed == 0)
4838 if (pci_channel_offline(pdev))
4845 for (i = 0; i < adapter->num_rx_queues; i++) {
4846 struct igc_ring *ring = adapter->rx_ring[i];
4847 u32 rqdpc = rd32(IGC_RQDPC(i));
4849 if (hw->mac.type >= igc_i225)
4850 wr32(IGC_RQDPC(i), 0);
4853 ring->rx_stats.drops += rqdpc;
4854 net_stats->rx_fifo_errors += rqdpc;
4858 start = u64_stats_fetch_begin(&ring->rx_syncp);
4859 _bytes = ring->rx_stats.bytes;
4860 _packets = ring->rx_stats.packets;
4861 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
4863 packets += _packets;
4866 net_stats->rx_bytes = bytes;
4867 net_stats->rx_packets = packets;
4871 for (i = 0; i < adapter->num_tx_queues; i++) {
4872 struct igc_ring *ring = adapter->tx_ring[i];
4875 start = u64_stats_fetch_begin(&ring->tx_syncp);
4876 _bytes = ring->tx_stats.bytes;
4877 _packets = ring->tx_stats.packets;
4878 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
4880 packets += _packets;
4882 net_stats->tx_bytes = bytes;
4883 net_stats->tx_packets = packets;
4886 /* read stats registers */
4887 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4888 adapter->stats.gprc += rd32(IGC_GPRC);
4889 adapter->stats.gorc += rd32(IGC_GORCL);
4890 rd32(IGC_GORCH); /* clear GORCL */
4891 adapter->stats.bprc += rd32(IGC_BPRC);
4892 adapter->stats.mprc += rd32(IGC_MPRC);
4893 adapter->stats.roc += rd32(IGC_ROC);
4895 adapter->stats.prc64 += rd32(IGC_PRC64);
4896 adapter->stats.prc127 += rd32(IGC_PRC127);
4897 adapter->stats.prc255 += rd32(IGC_PRC255);
4898 adapter->stats.prc511 += rd32(IGC_PRC511);
4899 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4900 adapter->stats.prc1522 += rd32(IGC_PRC1522);
4901 adapter->stats.tlpic += rd32(IGC_TLPIC);
4902 adapter->stats.rlpic += rd32(IGC_RLPIC);
4903 adapter->stats.hgptc += rd32(IGC_HGPTC);
4905 mpc = rd32(IGC_MPC);
4906 adapter->stats.mpc += mpc;
4907 net_stats->rx_fifo_errors += mpc;
4908 adapter->stats.scc += rd32(IGC_SCC);
4909 adapter->stats.ecol += rd32(IGC_ECOL);
4910 adapter->stats.mcc += rd32(IGC_MCC);
4911 adapter->stats.latecol += rd32(IGC_LATECOL);
4912 adapter->stats.dc += rd32(IGC_DC);
4913 adapter->stats.rlec += rd32(IGC_RLEC);
4914 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4915 adapter->stats.xontxc += rd32(IGC_XONTXC);
4916 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4917 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4918 adapter->stats.fcruc += rd32(IGC_FCRUC);
4919 adapter->stats.gptc += rd32(IGC_GPTC);
4920 adapter->stats.gotc += rd32(IGC_GOTCL);
4921 rd32(IGC_GOTCH); /* clear GOTCL */
4922 adapter->stats.rnbc += rd32(IGC_RNBC);
4923 adapter->stats.ruc += rd32(IGC_RUC);
4924 adapter->stats.rfc += rd32(IGC_RFC);
4925 adapter->stats.rjc += rd32(IGC_RJC);
4926 adapter->stats.tor += rd32(IGC_TORH);
4927 adapter->stats.tot += rd32(IGC_TOTH);
4928 adapter->stats.tpr += rd32(IGC_TPR);
4930 adapter->stats.ptc64 += rd32(IGC_PTC64);
4931 adapter->stats.ptc127 += rd32(IGC_PTC127);
4932 adapter->stats.ptc255 += rd32(IGC_PTC255);
4933 adapter->stats.ptc511 += rd32(IGC_PTC511);
4934 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4935 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4937 adapter->stats.mptc += rd32(IGC_MPTC);
4938 adapter->stats.bptc += rd32(IGC_BPTC);
4940 adapter->stats.tpt += rd32(IGC_TPT);
4941 adapter->stats.colc += rd32(IGC_COLC);
4942 adapter->stats.colc += rd32(IGC_RERC);
4944 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4946 adapter->stats.tsctc += rd32(IGC_TSCTC);
4948 adapter->stats.iac += rd32(IGC_IAC);
4950 /* Fill out the OS statistics structure */
4951 net_stats->multicast = adapter->stats.mprc;
4952 net_stats->collisions = adapter->stats.colc;
4956 /* RLEC on some newer hardware can be incorrect so build
4957 * our own version based on RUC and ROC
4959 net_stats->rx_errors = adapter->stats.rxerrc +
4960 adapter->stats.crcerrs + adapter->stats.algnerrc +
4961 adapter->stats.ruc + adapter->stats.roc +
4962 adapter->stats.cexterr;
4963 net_stats->rx_length_errors = adapter->stats.ruc +
4965 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4966 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4967 net_stats->rx_missed_errors = adapter->stats.mpc;
4970 net_stats->tx_errors = adapter->stats.ecol +
4971 adapter->stats.latecol;
4972 net_stats->tx_aborted_errors = adapter->stats.ecol;
4973 net_stats->tx_window_errors = adapter->stats.latecol;
4974 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4977 net_stats->tx_dropped = adapter->stats.txdrop;
4979 /* Management Stats */
4980 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4981 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4982 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4986 * igc_down - Close the interface
4987 * @adapter: board private structure
4989 void igc_down(struct igc_adapter *adapter)
4991 struct net_device *netdev = adapter->netdev;
4992 struct igc_hw *hw = &adapter->hw;
4996 set_bit(__IGC_DOWN, &adapter->state);
4998 igc_ptp_suspend(adapter);
5000 if (pci_device_is_present(adapter->pdev)) {
5001 /* disable receives in the hardware */
5002 rctl = rd32(IGC_RCTL);
5003 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
5004 /* flush and sleep below */
5006 /* set trans_start so we don't get spurious watchdogs during reset */
5007 netif_trans_update(netdev);
5009 netif_carrier_off(netdev);
5010 netif_tx_stop_all_queues(netdev);
5012 if (pci_device_is_present(adapter->pdev)) {
5013 /* disable transmits in the hardware */
5014 tctl = rd32(IGC_TCTL);
5015 tctl &= ~IGC_TCTL_EN;
5016 wr32(IGC_TCTL, tctl);
5017 /* flush both disables and wait for them to finish */
5019 usleep_range(10000, 20000);
5021 igc_irq_disable(adapter);
5024 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5026 for (i = 0; i < adapter->num_q_vectors; i++) {
5027 if (adapter->q_vector[i]) {
5028 napi_synchronize(&adapter->q_vector[i]->napi);
5029 napi_disable(&adapter->q_vector[i]->napi);
5033 del_timer_sync(&adapter->watchdog_timer);
5034 del_timer_sync(&adapter->phy_info_timer);
5036 /* record the stats before reset*/
5037 spin_lock(&adapter->stats64_lock);
5038 igc_update_stats(adapter);
5039 spin_unlock(&adapter->stats64_lock);
5041 adapter->link_speed = 0;
5042 adapter->link_duplex = 0;
5044 if (!pci_channel_offline(adapter->pdev))
5047 /* clear VLAN promisc flag so VFTA will be updated if necessary */
5048 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
5050 igc_clean_all_tx_rings(adapter);
5051 igc_clean_all_rx_rings(adapter);
5054 void igc_reinit_locked(struct igc_adapter *adapter)
5056 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5057 usleep_range(1000, 2000);
5060 clear_bit(__IGC_RESETTING, &adapter->state);
5063 static void igc_reset_task(struct work_struct *work)
5065 struct igc_adapter *adapter;
5067 adapter = container_of(work, struct igc_adapter, reset_task);
5070 /* If we're already down or resetting, just bail */
5071 if (test_bit(__IGC_DOWN, &adapter->state) ||
5072 test_bit(__IGC_RESETTING, &adapter->state)) {
5077 igc_rings_dump(adapter);
5078 igc_regs_dump(adapter);
5079 netdev_err(adapter->netdev, "Reset adapter\n");
5080 igc_reinit_locked(adapter);
5085 * igc_change_mtu - Change the Maximum Transfer Unit
5086 * @netdev: network interface device structure
5087 * @new_mtu: new value for maximum frame size
5089 * Returns 0 on success, negative on failure
5091 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5093 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5094 struct igc_adapter *adapter = netdev_priv(netdev);
5096 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5097 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5101 /* adjust max frame to be at least the size of a standard frame */
5102 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5103 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5105 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5106 usleep_range(1000, 2000);
5108 /* igc_down has a dependency on max_frame_size */
5109 adapter->max_frame_size = max_frame;
5111 if (netif_running(netdev))
5114 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5115 netdev->mtu = new_mtu;
5117 if (netif_running(netdev))
5122 clear_bit(__IGC_RESETTING, &adapter->state);
5128 * igc_tx_timeout - Respond to a Tx Hang
5129 * @netdev: network interface device structure
5130 * @txqueue: queue number that timed out
5132 static void igc_tx_timeout(struct net_device *netdev,
5133 unsigned int __always_unused txqueue)
5135 struct igc_adapter *adapter = netdev_priv(netdev);
5136 struct igc_hw *hw = &adapter->hw;
5138 /* Do the reset outside of interrupt context */
5139 adapter->tx_timeout_count++;
5140 schedule_work(&adapter->reset_task);
5142 (adapter->eims_enable_mask & ~adapter->eims_other));
5146 * igc_get_stats64 - Get System Network Statistics
5147 * @netdev: network interface device structure
5148 * @stats: rtnl_link_stats64 pointer
5150 * Returns the address of the device statistics structure.
5151 * The statistics are updated here and also from the timer callback.
5153 static void igc_get_stats64(struct net_device *netdev,
5154 struct rtnl_link_stats64 *stats)
5156 struct igc_adapter *adapter = netdev_priv(netdev);
5158 spin_lock(&adapter->stats64_lock);
5159 if (!test_bit(__IGC_RESETTING, &adapter->state))
5160 igc_update_stats(adapter);
5161 memcpy(stats, &adapter->stats64, sizeof(*stats));
5162 spin_unlock(&adapter->stats64_lock);
5165 static netdev_features_t igc_fix_features(struct net_device *netdev,
5166 netdev_features_t features)
5168 /* Since there is no support for separate Rx/Tx vlan accel
5169 * enable/disable make sure Tx flag is always in same state as Rx.
5171 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5172 features |= NETIF_F_HW_VLAN_CTAG_TX;
5174 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5179 static int igc_set_features(struct net_device *netdev,
5180 netdev_features_t features)
5182 netdev_features_t changed = netdev->features ^ features;
5183 struct igc_adapter *adapter = netdev_priv(netdev);
5185 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5186 igc_vlan_mode(netdev, features);
5188 /* Add VLAN support */
5189 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5192 if (!(features & NETIF_F_NTUPLE))
5193 igc_flush_nfc_rules(adapter);
5195 netdev->features = features;
5197 if (netif_running(netdev))
5198 igc_reinit_locked(adapter);
5205 static netdev_features_t
5206 igc_features_check(struct sk_buff *skb, struct net_device *dev,
5207 netdev_features_t features)
5209 unsigned int network_hdr_len, mac_hdr_len;
5211 /* Make certain the headers can be described by a context descriptor */
5212 mac_hdr_len = skb_network_header(skb) - skb->data;
5213 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5214 return features & ~(NETIF_F_HW_CSUM |
5216 NETIF_F_HW_VLAN_CTAG_TX |
5220 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5221 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
5222 return features & ~(NETIF_F_HW_CSUM |
5227 /* We can only support IPv4 TSO in tunnels if we can mangle the
5228 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5230 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5231 features &= ~NETIF_F_TSO;
5236 static void igc_tsync_interrupt(struct igc_adapter *adapter)
5238 u32 ack, tsauxc, sec, nsec, tsicr;
5239 struct igc_hw *hw = &adapter->hw;
5240 struct ptp_clock_event event;
5241 struct timespec64 ts;
5243 tsicr = rd32(IGC_TSICR);
5246 if (tsicr & IGC_TSICR_SYS_WRAP) {
5247 event.type = PTP_CLOCK_PPS;
5248 if (adapter->ptp_caps.pps)
5249 ptp_clock_event(adapter->ptp_clock, &event);
5250 ack |= IGC_TSICR_SYS_WRAP;
5253 if (tsicr & IGC_TSICR_TXTS) {
5254 /* retrieve hardware timestamp */
5255 schedule_work(&adapter->ptp_tx_work);
5256 ack |= IGC_TSICR_TXTS;
5259 if (tsicr & IGC_TSICR_TT0) {
5260 spin_lock(&adapter->tmreg_lock);
5261 ts = timespec64_add(adapter->perout[0].start,
5262 adapter->perout[0].period);
5263 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5264 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5265 tsauxc = rd32(IGC_TSAUXC);
5266 tsauxc |= IGC_TSAUXC_EN_TT0;
5267 wr32(IGC_TSAUXC, tsauxc);
5268 adapter->perout[0].start = ts;
5269 spin_unlock(&adapter->tmreg_lock);
5270 ack |= IGC_TSICR_TT0;
5273 if (tsicr & IGC_TSICR_TT1) {
5274 spin_lock(&adapter->tmreg_lock);
5275 ts = timespec64_add(adapter->perout[1].start,
5276 adapter->perout[1].period);
5277 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5278 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5279 tsauxc = rd32(IGC_TSAUXC);
5280 tsauxc |= IGC_TSAUXC_EN_TT1;
5281 wr32(IGC_TSAUXC, tsauxc);
5282 adapter->perout[1].start = ts;
5283 spin_unlock(&adapter->tmreg_lock);
5284 ack |= IGC_TSICR_TT1;
5287 if (tsicr & IGC_TSICR_AUTT0) {
5288 nsec = rd32(IGC_AUXSTMPL0);
5289 sec = rd32(IGC_AUXSTMPH0);
5290 event.type = PTP_CLOCK_EXTTS;
5292 event.timestamp = sec * NSEC_PER_SEC + nsec;
5293 ptp_clock_event(adapter->ptp_clock, &event);
5294 ack |= IGC_TSICR_AUTT0;
5297 if (tsicr & IGC_TSICR_AUTT1) {
5298 nsec = rd32(IGC_AUXSTMPL1);
5299 sec = rd32(IGC_AUXSTMPH1);
5300 event.type = PTP_CLOCK_EXTTS;
5302 event.timestamp = sec * NSEC_PER_SEC + nsec;
5303 ptp_clock_event(adapter->ptp_clock, &event);
5304 ack |= IGC_TSICR_AUTT1;
5307 /* acknowledge the interrupts */
5308 wr32(IGC_TSICR, ack);
5312 * igc_msix_other - msix other interrupt handler
5313 * @irq: interrupt number
5314 * @data: pointer to a q_vector
5316 static irqreturn_t igc_msix_other(int irq, void *data)
5318 struct igc_adapter *adapter = data;
5319 struct igc_hw *hw = &adapter->hw;
5320 u32 icr = rd32(IGC_ICR);
5322 /* reading ICR causes bit 31 of EICR to be cleared */
5323 if (icr & IGC_ICR_DRSTA)
5324 schedule_work(&adapter->reset_task);
5326 if (icr & IGC_ICR_DOUTSYNC) {
5327 /* HW is reporting DMA is out of sync */
5328 adapter->stats.doosync++;
5331 if (icr & IGC_ICR_LSC) {
5332 hw->mac.get_link_status = true;
5333 /* guard against interrupt when we're going down */
5334 if (!test_bit(__IGC_DOWN, &adapter->state))
5335 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5338 if (icr & IGC_ICR_TS)
5339 igc_tsync_interrupt(adapter);
5341 wr32(IGC_EIMS, adapter->eims_other);
5346 static void igc_write_itr(struct igc_q_vector *q_vector)
5348 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5350 if (!q_vector->set_itr)
5354 itr_val = IGC_ITR_VAL_MASK;
5356 itr_val |= IGC_EITR_CNT_IGNR;
5358 writel(itr_val, q_vector->itr_register);
5359 q_vector->set_itr = 0;
5362 static irqreturn_t igc_msix_ring(int irq, void *data)
5364 struct igc_q_vector *q_vector = data;
5366 /* Write the ITR value calculated from the previous interrupt. */
5367 igc_write_itr(q_vector);
5369 napi_schedule(&q_vector->napi);
5375 * igc_request_msix - Initialize MSI-X interrupts
5376 * @adapter: Pointer to adapter structure
5378 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5381 static int igc_request_msix(struct igc_adapter *adapter)
5383 unsigned int num_q_vectors = adapter->num_q_vectors;
5384 int i = 0, err = 0, vector = 0, free_vector = 0;
5385 struct net_device *netdev = adapter->netdev;
5387 err = request_irq(adapter->msix_entries[vector].vector,
5388 &igc_msix_other, 0, netdev->name, adapter);
5392 if (num_q_vectors > MAX_Q_VECTORS) {
5393 num_q_vectors = MAX_Q_VECTORS;
5394 dev_warn(&adapter->pdev->dev,
5395 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5396 adapter->num_q_vectors, MAX_Q_VECTORS);
5398 for (i = 0; i < num_q_vectors; i++) {
5399 struct igc_q_vector *q_vector = adapter->q_vector[i];
5403 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5405 if (q_vector->rx.ring && q_vector->tx.ring)
5406 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5407 q_vector->rx.ring->queue_index);
5408 else if (q_vector->tx.ring)
5409 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5410 q_vector->tx.ring->queue_index);
5411 else if (q_vector->rx.ring)
5412 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5413 q_vector->rx.ring->queue_index);
5415 sprintf(q_vector->name, "%s-unused", netdev->name);
5417 err = request_irq(adapter->msix_entries[vector].vector,
5418 igc_msix_ring, 0, q_vector->name,
5424 igc_configure_msix(adapter);
5428 /* free already assigned IRQs */
5429 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5432 for (i = 0; i < vector; i++) {
5433 free_irq(adapter->msix_entries[free_vector++].vector,
5434 adapter->q_vector[i]);
5441 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5442 * @adapter: Pointer to adapter structure
5444 * This function resets the device so that it has 0 rx queues, tx queues, and
5445 * MSI-X interrupts allocated.
5447 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5449 igc_free_q_vectors(adapter);
5450 igc_reset_interrupt_capability(adapter);
5453 /* Need to wait a few seconds after link up to get diagnostic information from
5456 static void igc_update_phy_info(struct timer_list *t)
5458 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5460 igc_get_phy_info(&adapter->hw);
5464 * igc_has_link - check shared code for link and determine up/down
5465 * @adapter: pointer to driver private info
5467 bool igc_has_link(struct igc_adapter *adapter)
5469 struct igc_hw *hw = &adapter->hw;
5470 bool link_active = false;
5472 /* get_link_status is set on LSC (link status) interrupt or
5473 * rx sequence error interrupt. get_link_status will stay
5474 * false until the igc_check_for_link establishes link
5475 * for copper adapters ONLY
5477 if (!hw->mac.get_link_status)
5479 hw->mac.ops.check_for_link(hw);
5480 link_active = !hw->mac.get_link_status;
5482 if (hw->mac.type == igc_i225) {
5483 if (!netif_carrier_ok(adapter->netdev)) {
5484 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5485 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5486 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5487 adapter->link_check_timeout = jiffies;
5495 * igc_watchdog - Timer Call-back
5496 * @t: timer for the watchdog
5498 static void igc_watchdog(struct timer_list *t)
5500 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5501 /* Do the rest outside of interrupt context */
5502 schedule_work(&adapter->watchdog_task);
5505 static void igc_watchdog_task(struct work_struct *work)
5507 struct igc_adapter *adapter = container_of(work,
5510 struct net_device *netdev = adapter->netdev;
5511 struct igc_hw *hw = &adapter->hw;
5512 struct igc_phy_info *phy = &hw->phy;
5513 u16 phy_data, retry_count = 20;
5517 link = igc_has_link(adapter);
5519 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5520 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5521 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5527 /* Cancel scheduled suspend requests. */
5528 pm_runtime_resume(netdev->dev.parent);
5530 if (!netif_carrier_ok(netdev)) {
5533 hw->mac.ops.get_speed_and_duplex(hw,
5534 &adapter->link_speed,
5535 &adapter->link_duplex);
5537 ctrl = rd32(IGC_CTRL);
5538 /* Link status message must follow this format */
5540 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5541 adapter->link_speed,
5542 adapter->link_duplex == FULL_DUPLEX ?
5544 (ctrl & IGC_CTRL_TFCE) &&
5545 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5546 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5547 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
5549 /* disable EEE if enabled */
5550 if ((adapter->flags & IGC_FLAG_EEE) &&
5551 adapter->link_duplex == HALF_DUPLEX) {
5553 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5554 adapter->hw.dev_spec._base.eee_enable = false;
5555 adapter->flags &= ~IGC_FLAG_EEE;
5558 /* check if SmartSpeed worked */
5559 igc_check_downshift(hw);
5560 if (phy->speed_downgraded)
5561 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5563 /* adjust timeout factor according to speed/duplex */
5564 adapter->tx_timeout_factor = 1;
5565 switch (adapter->link_speed) {
5567 adapter->tx_timeout_factor = 14;
5572 adapter->tx_timeout_factor = 1;
5576 /* Once the launch time has been set on the wire, there
5577 * is a delay before the link speed can be determined
5578 * based on link-up activity. Write into the register
5579 * as soon as we know the correct link speed.
5581 igc_tsn_adjust_txtime_offset(adapter);
5583 if (adapter->link_speed != SPEED_1000)
5586 /* wait for Remote receiver status OK */
5588 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5590 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5594 goto retry_read_status;
5595 } else if (!retry_count) {
5596 netdev_err(netdev, "exceed max 2 second\n");
5599 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5602 netif_carrier_on(netdev);
5604 /* link state has changed, schedule phy info update */
5605 if (!test_bit(__IGC_DOWN, &adapter->state))
5606 mod_timer(&adapter->phy_info_timer,
5607 round_jiffies(jiffies + 2 * HZ));
5610 if (netif_carrier_ok(netdev)) {
5611 adapter->link_speed = 0;
5612 adapter->link_duplex = 0;
5614 /* Links status message must follow this format */
5615 netdev_info(netdev, "NIC Link is Down\n");
5616 netif_carrier_off(netdev);
5618 /* link state has changed, schedule phy info update */
5619 if (!test_bit(__IGC_DOWN, &adapter->state))
5620 mod_timer(&adapter->phy_info_timer,
5621 round_jiffies(jiffies + 2 * HZ));
5623 pm_schedule_suspend(netdev->dev.parent,
5628 spin_lock(&adapter->stats64_lock);
5629 igc_update_stats(adapter);
5630 spin_unlock(&adapter->stats64_lock);
5632 for (i = 0; i < adapter->num_tx_queues; i++) {
5633 struct igc_ring *tx_ring = adapter->tx_ring[i];
5635 if (!netif_carrier_ok(netdev)) {
5636 /* We've lost link, so the controller stops DMA,
5637 * but we've got queued Tx work that's never going
5638 * to get done, so reset controller to flush Tx.
5639 * (Do the reset outside of interrupt context).
5641 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5642 adapter->tx_timeout_count++;
5643 schedule_work(&adapter->reset_task);
5644 /* return immediately since reset is imminent */
5649 /* Force detection of hung controller every watchdog period */
5650 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5653 /* Cause software interrupt to ensure Rx ring is cleaned */
5654 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5657 for (i = 0; i < adapter->num_q_vectors; i++)
5658 eics |= adapter->q_vector[i]->eims_value;
5659 wr32(IGC_EICS, eics);
5661 wr32(IGC_ICS, IGC_ICS_RXDMT0);
5664 igc_ptp_tx_hang(adapter);
5666 /* Reset the timer */
5667 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5668 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5669 mod_timer(&adapter->watchdog_timer,
5670 round_jiffies(jiffies + HZ));
5672 mod_timer(&adapter->watchdog_timer,
5673 round_jiffies(jiffies + 2 * HZ));
5678 * igc_intr_msi - Interrupt Handler
5679 * @irq: interrupt number
5680 * @data: pointer to a network interface device structure
5682 static irqreturn_t igc_intr_msi(int irq, void *data)
5684 struct igc_adapter *adapter = data;
5685 struct igc_q_vector *q_vector = adapter->q_vector[0];
5686 struct igc_hw *hw = &adapter->hw;
5687 /* read ICR disables interrupts using IAM */
5688 u32 icr = rd32(IGC_ICR);
5690 igc_write_itr(q_vector);
5692 if (icr & IGC_ICR_DRSTA)
5693 schedule_work(&adapter->reset_task);
5695 if (icr & IGC_ICR_DOUTSYNC) {
5696 /* HW is reporting DMA is out of sync */
5697 adapter->stats.doosync++;
5700 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5701 hw->mac.get_link_status = true;
5702 if (!test_bit(__IGC_DOWN, &adapter->state))
5703 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5706 if (icr & IGC_ICR_TS)
5707 igc_tsync_interrupt(adapter);
5709 napi_schedule(&q_vector->napi);
5715 * igc_intr - Legacy Interrupt Handler
5716 * @irq: interrupt number
5717 * @data: pointer to a network interface device structure
5719 static irqreturn_t igc_intr(int irq, void *data)
5721 struct igc_adapter *adapter = data;
5722 struct igc_q_vector *q_vector = adapter->q_vector[0];
5723 struct igc_hw *hw = &adapter->hw;
5724 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5725 * need for the IMC write
5727 u32 icr = rd32(IGC_ICR);
5729 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5730 * not set, then the adapter didn't send an interrupt
5732 if (!(icr & IGC_ICR_INT_ASSERTED))
5735 igc_write_itr(q_vector);
5737 if (icr & IGC_ICR_DRSTA)
5738 schedule_work(&adapter->reset_task);
5740 if (icr & IGC_ICR_DOUTSYNC) {
5741 /* HW is reporting DMA is out of sync */
5742 adapter->stats.doosync++;
5745 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5746 hw->mac.get_link_status = true;
5747 /* guard against interrupt when we're going down */
5748 if (!test_bit(__IGC_DOWN, &adapter->state))
5749 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5752 if (icr & IGC_ICR_TS)
5753 igc_tsync_interrupt(adapter);
5755 napi_schedule(&q_vector->napi);
5760 static void igc_free_irq(struct igc_adapter *adapter)
5762 if (adapter->msix_entries) {
5765 free_irq(adapter->msix_entries[vector++].vector, adapter);
5767 for (i = 0; i < adapter->num_q_vectors; i++)
5768 free_irq(adapter->msix_entries[vector++].vector,
5769 adapter->q_vector[i]);
5771 free_irq(adapter->pdev->irq, adapter);
5776 * igc_request_irq - initialize interrupts
5777 * @adapter: Pointer to adapter structure
5779 * Attempts to configure interrupts using the best available
5780 * capabilities of the hardware and kernel.
5782 static int igc_request_irq(struct igc_adapter *adapter)
5784 struct net_device *netdev = adapter->netdev;
5785 struct pci_dev *pdev = adapter->pdev;
5788 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5789 err = igc_request_msix(adapter);
5792 /* fall back to MSI */
5793 igc_free_all_tx_resources(adapter);
5794 igc_free_all_rx_resources(adapter);
5796 igc_clear_interrupt_scheme(adapter);
5797 err = igc_init_interrupt_scheme(adapter, false);
5800 igc_setup_all_tx_resources(adapter);
5801 igc_setup_all_rx_resources(adapter);
5802 igc_configure(adapter);
5805 igc_assign_vector(adapter->q_vector[0], 0);
5807 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5808 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5809 netdev->name, adapter);
5813 /* fall back to legacy interrupts */
5814 igc_reset_interrupt_capability(adapter);
5815 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5818 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5819 netdev->name, adapter);
5822 netdev_err(netdev, "Error %d getting interrupt\n", err);
5829 * __igc_open - Called when a network interface is made active
5830 * @netdev: network interface device structure
5831 * @resuming: boolean indicating if the device is resuming
5833 * Returns 0 on success, negative value on failure
5835 * The open entry point is called when a network interface is made
5836 * active by the system (IFF_UP). At this point all resources needed
5837 * for transmit and receive operations are allocated, the interrupt
5838 * handler is registered with the OS, the watchdog timer is started,
5839 * and the stack is notified that the interface is ready.
5841 static int __igc_open(struct net_device *netdev, bool resuming)
5843 struct igc_adapter *adapter = netdev_priv(netdev);
5844 struct pci_dev *pdev = adapter->pdev;
5845 struct igc_hw *hw = &adapter->hw;
5849 /* disallow open during test */
5851 if (test_bit(__IGC_TESTING, &adapter->state)) {
5857 pm_runtime_get_sync(&pdev->dev);
5859 netif_carrier_off(netdev);
5861 /* allocate transmit descriptors */
5862 err = igc_setup_all_tx_resources(adapter);
5866 /* allocate receive descriptors */
5867 err = igc_setup_all_rx_resources(adapter);
5871 igc_power_up_link(adapter);
5873 igc_configure(adapter);
5875 err = igc_request_irq(adapter);
5879 /* Notify the stack of the actual queue counts. */
5880 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5882 goto err_set_queues;
5884 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5886 goto err_set_queues;
5888 clear_bit(__IGC_DOWN, &adapter->state);
5890 for (i = 0; i < adapter->num_q_vectors; i++)
5891 napi_enable(&adapter->q_vector[i]->napi);
5893 /* Clear any pending interrupts. */
5895 igc_irq_enable(adapter);
5898 pm_runtime_put(&pdev->dev);
5900 netif_tx_start_all_queues(netdev);
5902 /* start the watchdog. */
5903 hw->mac.get_link_status = true;
5904 schedule_work(&adapter->watchdog_task);
5909 igc_free_irq(adapter);
5911 igc_release_hw_control(adapter);
5912 igc_power_down_phy_copper_base(&adapter->hw);
5913 igc_free_all_rx_resources(adapter);
5915 igc_free_all_tx_resources(adapter);
5919 pm_runtime_put(&pdev->dev);
5924 int igc_open(struct net_device *netdev)
5926 return __igc_open(netdev, false);
5930 * __igc_close - Disables a network interface
5931 * @netdev: network interface device structure
5932 * @suspending: boolean indicating the device is suspending
5934 * Returns 0, this is not allowed to fail
5936 * The close entry point is called when an interface is de-activated
5937 * by the OS. The hardware is still under the driver's control, but
5938 * needs to be disabled. A global MAC reset is issued to stop the
5939 * hardware, and all transmit and receive resources are freed.
5941 static int __igc_close(struct net_device *netdev, bool suspending)
5943 struct igc_adapter *adapter = netdev_priv(netdev);
5944 struct pci_dev *pdev = adapter->pdev;
5946 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5949 pm_runtime_get_sync(&pdev->dev);
5953 igc_release_hw_control(adapter);
5955 igc_free_irq(adapter);
5957 igc_free_all_tx_resources(adapter);
5958 igc_free_all_rx_resources(adapter);
5961 pm_runtime_put_sync(&pdev->dev);
5966 int igc_close(struct net_device *netdev)
5968 if (netif_device_present(netdev) || netdev->dismantle)
5969 return __igc_close(netdev, false);
5974 * igc_ioctl - Access the hwtstamp interface
5975 * @netdev: network interface device structure
5976 * @ifr: interface request data
5977 * @cmd: ioctl command
5979 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5983 return igc_ptp_get_ts_config(netdev, ifr);
5985 return igc_ptp_set_ts_config(netdev, ifr);
5991 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5994 struct igc_ring *ring;
5996 if (queue < 0 || queue >= adapter->num_tx_queues)
5999 ring = adapter->tx_ring[queue];
6000 ring->launchtime_enable = enable;
6005 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
6007 struct timespec64 b;
6009 b = ktime_to_timespec64(base_time);
6011 return timespec64_compare(now, &b) > 0;
6014 static bool validate_schedule(struct igc_adapter *adapter,
6015 const struct tc_taprio_qopt_offload *qopt)
6017 int queue_uses[IGC_MAX_TX_QUEUES] = { };
6018 struct igc_hw *hw = &adapter->hw;
6019 struct timespec64 now;
6022 if (qopt->cycle_time_extension)
6025 igc_ptp_read(adapter, &now);
6027 /* If we program the controller's BASET registers with a time
6028 * in the future, it will hold all the packets until that
6029 * time, causing a lot of TX Hangs, so to avoid that, we
6030 * reject schedules that would start in the future.
6031 * Note: Limitation above is no longer in i226.
6033 if (!is_base_time_past(qopt->base_time, &now) &&
6034 igc_is_device_id_i225(hw))
6037 for (n = 0; n < qopt->num_entries; n++) {
6038 const struct tc_taprio_sched_entry *e, *prev;
6041 prev = n ? &qopt->entries[n - 1] : NULL;
6042 e = &qopt->entries[n];
6044 /* i225 only supports "global" frame preemption
6047 if (e->command != TC_TAPRIO_CMD_SET_GATES)
6050 for (i = 0; i < adapter->num_tx_queues; i++)
6051 if (e->gate_mask & BIT(i)) {
6054 /* There are limitations: A single queue cannot
6055 * be opened and closed multiple times per cycle
6056 * unless the gate stays open. Check for it.
6058 if (queue_uses[i] > 1 &&
6059 !(prev->gate_mask & BIT(i)))
6067 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6068 struct tc_etf_qopt_offload *qopt)
6070 struct igc_hw *hw = &adapter->hw;
6073 if (hw->mac.type != igc_i225)
6076 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6080 return igc_tsn_offload_apply(adapter);
6083 static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6087 adapter->base_time = 0;
6088 adapter->cycle_time = NSEC_PER_SEC;
6089 adapter->qbv_config_change_errors = 0;
6091 for (i = 0; i < adapter->num_tx_queues; i++) {
6092 struct igc_ring *ring = adapter->tx_ring[i];
6094 ring->start_time = 0;
6095 ring->end_time = NSEC_PER_SEC;
6102 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6103 struct tc_taprio_qopt_offload *qopt)
6105 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6106 struct igc_hw *hw = &adapter->hw;
6107 u32 start_time = 0, end_time = 0;
6111 adapter->qbv_enable = qopt->enable;
6114 return igc_tsn_clear_schedule(adapter);
6116 if (qopt->base_time < 0)
6119 if (igc_is_device_id_i225(hw) && adapter->base_time)
6122 if (!validate_schedule(adapter, qopt))
6125 adapter->cycle_time = qopt->cycle_time;
6126 adapter->base_time = qopt->base_time;
6128 for (n = 0; n < qopt->num_entries; n++) {
6129 struct tc_taprio_sched_entry *e = &qopt->entries[n];
6131 end_time += e->interval;
6133 /* If any of the conditions below are true, we need to manually
6134 * control the end time of the cycle.
6135 * 1. Qbv users can specify a cycle time that is not equal
6136 * to the total GCL intervals. Hence, recalculation is
6137 * necessary here to exclude the time interval that
6138 * exceeds the cycle time.
6139 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6140 * once the end of the list is reached, it will switch
6141 * to the END_OF_CYCLE state and leave the gates in the
6142 * same state until the next cycle is started.
6144 if (end_time > adapter->cycle_time ||
6145 n + 1 == qopt->num_entries)
6146 end_time = adapter->cycle_time;
6148 for (i = 0; i < adapter->num_tx_queues; i++) {
6149 struct igc_ring *ring = adapter->tx_ring[i];
6151 if (!(e->gate_mask & BIT(i)))
6154 /* Check whether a queue stays open for more than one
6155 * entry. If so, keep the start and advance the end
6158 if (!queue_configured[i])
6159 ring->start_time = start_time;
6160 ring->end_time = end_time;
6162 queue_configured[i] = true;
6165 start_time += e->interval;
6168 /* Check whether a queue gets configured.
6169 * If not, set the start and end time to be end time.
6171 for (i = 0; i < adapter->num_tx_queues; i++) {
6172 if (!queue_configured[i]) {
6173 struct igc_ring *ring = adapter->tx_ring[i];
6175 ring->start_time = end_time;
6176 ring->end_time = end_time;
6180 for (i = 0; i < adapter->num_tx_queues; i++) {
6181 struct igc_ring *ring = adapter->tx_ring[i];
6182 struct net_device *dev = adapter->netdev;
6184 if (qopt->max_sdu[i])
6185 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len;
6193 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6194 struct tc_taprio_qopt_offload *qopt)
6196 struct igc_hw *hw = &adapter->hw;
6199 if (hw->mac.type != igc_i225)
6202 err = igc_save_qbv_schedule(adapter, qopt);
6206 return igc_tsn_offload_apply(adapter);
6209 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6210 bool enable, int idleslope, int sendslope,
6211 int hicredit, int locredit)
6213 bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6214 struct net_device *netdev = adapter->netdev;
6215 struct igc_ring *ring;
6218 /* i225 has two sets of credit-based shaper logic.
6219 * Supporting it only on the top two priority queues
6221 if (queue < 0 || queue > 1)
6224 ring = adapter->tx_ring[queue];
6226 for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6227 if (adapter->tx_ring[i])
6228 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6230 /* CBS should be enabled on the highest priority queue first in order
6231 * for the CBS algorithm to operate as intended.
6234 if (queue == 1 && !cbs_status[0]) {
6236 "Enabling CBS on queue1 before queue0\n");
6240 if (queue == 0 && cbs_status[1]) {
6242 "Disabling CBS on queue0 before queue1\n");
6247 ring->cbs_enable = enable;
6248 ring->idleslope = idleslope;
6249 ring->sendslope = sendslope;
6250 ring->hicredit = hicredit;
6251 ring->locredit = locredit;
6256 static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6257 struct tc_cbs_qopt_offload *qopt)
6259 struct igc_hw *hw = &adapter->hw;
6262 if (hw->mac.type != igc_i225)
6265 if (qopt->queue < 0 || qopt->queue > 1)
6268 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6269 qopt->idleslope, qopt->sendslope,
6270 qopt->hicredit, qopt->locredit);
6274 return igc_tsn_offload_apply(adapter);
6277 static int igc_tc_query_caps(struct igc_adapter *adapter,
6278 struct tc_query_caps_base *base)
6280 struct igc_hw *hw = &adapter->hw;
6282 switch (base->type) {
6283 case TC_SETUP_QDISC_TAPRIO: {
6284 struct tc_taprio_caps *caps = base->caps;
6286 caps->broken_mqprio = true;
6288 if (hw->mac.type == igc_i225) {
6289 caps->supports_queue_max_sdu = true;
6290 caps->gate_mask_per_txq = true;
6300 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6303 struct igc_adapter *adapter = netdev_priv(dev);
6307 return igc_tc_query_caps(adapter, type_data);
6308 case TC_SETUP_QDISC_TAPRIO:
6309 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6311 case TC_SETUP_QDISC_ETF:
6312 return igc_tsn_enable_launchtime(adapter, type_data);
6314 case TC_SETUP_QDISC_CBS:
6315 return igc_tsn_enable_cbs(adapter, type_data);
6322 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6324 struct igc_adapter *adapter = netdev_priv(dev);
6326 switch (bpf->command) {
6327 case XDP_SETUP_PROG:
6328 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6329 case XDP_SETUP_XSK_POOL:
6330 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6337 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6338 struct xdp_frame **frames, u32 flags)
6340 struct igc_adapter *adapter = netdev_priv(dev);
6341 int cpu = smp_processor_id();
6342 struct netdev_queue *nq;
6343 struct igc_ring *ring;
6346 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
6349 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6352 ring = igc_xdp_get_tx_ring(adapter, cpu);
6353 nq = txring_txq(ring);
6355 __netif_tx_lock(nq, cpu);
6358 for (i = 0; i < num_frames; i++) {
6360 struct xdp_frame *xdpf = frames[i];
6362 err = igc_xdp_init_tx_descriptor(ring, xdpf);
6364 xdp_return_frame_rx_napi(xdpf);
6369 if (flags & XDP_XMIT_FLUSH)
6370 igc_flush_tx_descriptors(ring);
6372 __netif_tx_unlock(nq);
6374 return num_frames - drops;
6377 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
6378 struct igc_q_vector *q_vector)
6380 struct igc_hw *hw = &adapter->hw;
6383 eics |= q_vector->eims_value;
6384 wr32(IGC_EICS, eics);
6387 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6389 struct igc_adapter *adapter = netdev_priv(dev);
6390 struct igc_q_vector *q_vector;
6391 struct igc_ring *ring;
6393 if (test_bit(__IGC_DOWN, &adapter->state))
6396 if (!igc_xdp_is_enabled(adapter))
6399 if (queue_id >= adapter->num_rx_queues)
6402 ring = adapter->rx_ring[queue_id];
6404 if (!ring->xsk_pool)
6407 q_vector = adapter->q_vector[queue_id];
6408 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6409 igc_trigger_rxtxq_interrupt(adapter, q_vector);
6414 static const struct net_device_ops igc_netdev_ops = {
6415 .ndo_open = igc_open,
6416 .ndo_stop = igc_close,
6417 .ndo_start_xmit = igc_xmit_frame,
6418 .ndo_set_rx_mode = igc_set_rx_mode,
6419 .ndo_set_mac_address = igc_set_mac,
6420 .ndo_change_mtu = igc_change_mtu,
6421 .ndo_tx_timeout = igc_tx_timeout,
6422 .ndo_get_stats64 = igc_get_stats64,
6423 .ndo_fix_features = igc_fix_features,
6424 .ndo_set_features = igc_set_features,
6425 .ndo_features_check = igc_features_check,
6426 .ndo_eth_ioctl = igc_ioctl,
6427 .ndo_setup_tc = igc_setup_tc,
6429 .ndo_xdp_xmit = igc_xdp_xmit,
6430 .ndo_xsk_wakeup = igc_xsk_wakeup,
6433 /* PCIe configuration access */
6434 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6436 struct igc_adapter *adapter = hw->back;
6438 pci_read_config_word(adapter->pdev, reg, value);
6441 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6443 struct igc_adapter *adapter = hw->back;
6445 pci_write_config_word(adapter->pdev, reg, *value);
6448 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6450 struct igc_adapter *adapter = hw->back;
6452 if (!pci_is_pcie(adapter->pdev))
6453 return -IGC_ERR_CONFIG;
6455 pcie_capability_read_word(adapter->pdev, reg, value);
6460 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6462 struct igc_adapter *adapter = hw->back;
6464 if (!pci_is_pcie(adapter->pdev))
6465 return -IGC_ERR_CONFIG;
6467 pcie_capability_write_word(adapter->pdev, reg, *value);
6472 u32 igc_rd32(struct igc_hw *hw, u32 reg)
6474 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6475 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6478 if (IGC_REMOVED(hw_addr))
6481 value = readl(&hw_addr[reg]);
6483 /* reads should not return all F's */
6484 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6485 struct net_device *netdev = igc->netdev;
6488 netif_device_detach(netdev);
6489 netdev_err(netdev, "PCIe link lost, device now detached\n");
6490 WARN(pci_device_is_present(igc->pdev),
6491 "igc: Failed to read reg 0x%x!\n", reg);
6497 /* Mapping HW RSS Type to enum xdp_rss_hash_type */
6498 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
6499 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
6500 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6501 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
6502 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6503 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6504 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
6505 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6506 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6507 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6508 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
6509 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
6510 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6511 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
6512 [13] = XDP_RSS_TYPE_NONE,
6513 [14] = XDP_RSS_TYPE_NONE,
6514 [15] = XDP_RSS_TYPE_NONE,
6517 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6518 enum xdp_rss_hash_type *rss_type)
6520 const struct igc_xdp_buff *ctx = (void *)_ctx;
6522 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
6525 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
6526 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
6531 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
6533 const struct igc_xdp_buff *ctx = (void *)_ctx;
6535 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
6536 *timestamp = ctx->rx_ts;
6544 static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
6545 .xmo_rx_hash = igc_xdp_rx_hash,
6546 .xmo_rx_timestamp = igc_xdp_rx_timestamp,
6550 * igc_probe - Device Initialization Routine
6551 * @pdev: PCI device information struct
6552 * @ent: entry in igc_pci_tbl
6554 * Returns 0 on success, negative on failure
6556 * igc_probe initializes an adapter identified by a pci_dev structure.
6557 * The OS initialization, configuring the adapter private structure,
6558 * and a hardware reset occur.
6560 static int igc_probe(struct pci_dev *pdev,
6561 const struct pci_device_id *ent)
6563 struct igc_adapter *adapter;
6564 struct net_device *netdev;
6566 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
6569 err = pci_enable_device_mem(pdev);
6573 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6576 "No usable DMA configuration, aborting\n");
6580 err = pci_request_mem_regions(pdev, igc_driver_name);
6584 err = pci_enable_ptm(pdev, NULL);
6586 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
6588 pci_set_master(pdev);
6591 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6595 goto err_alloc_etherdev;
6597 SET_NETDEV_DEV(netdev, &pdev->dev);
6599 pci_set_drvdata(pdev, netdev);
6600 adapter = netdev_priv(netdev);
6601 adapter->netdev = netdev;
6602 adapter->pdev = pdev;
6605 adapter->port_num = hw->bus.func;
6606 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6608 err = pci_save_state(pdev);
6613 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6614 pci_resource_len(pdev, 0));
6615 if (!adapter->io_addr)
6618 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6619 hw->hw_addr = adapter->io_addr;
6621 netdev->netdev_ops = &igc_netdev_ops;
6622 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
6623 igc_ethtool_set_ops(netdev);
6624 netdev->watchdog_timeo = 5 * HZ;
6626 netdev->mem_start = pci_resource_start(pdev, 0);
6627 netdev->mem_end = pci_resource_end(pdev, 0);
6629 /* PCI config space info */
6630 hw->vendor_id = pdev->vendor;
6631 hw->device_id = pdev->device;
6632 hw->revision_id = pdev->revision;
6633 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6634 hw->subsystem_device_id = pdev->subsystem_device;
6636 /* Copy the default MAC and PHY function pointers */
6637 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6638 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6640 /* Initialize skew-specific constants */
6641 err = ei->get_invariants(hw);
6645 /* Add supported features to the features list*/
6646 netdev->features |= NETIF_F_SG;
6647 netdev->features |= NETIF_F_TSO;
6648 netdev->features |= NETIF_F_TSO6;
6649 netdev->features |= NETIF_F_TSO_ECN;
6650 netdev->features |= NETIF_F_RXHASH;
6651 netdev->features |= NETIF_F_RXCSUM;
6652 netdev->features |= NETIF_F_HW_CSUM;
6653 netdev->features |= NETIF_F_SCTP_CRC;
6654 netdev->features |= NETIF_F_HW_TC;
6656 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6657 NETIF_F_GSO_GRE_CSUM | \
6658 NETIF_F_GSO_IPXIP4 | \
6659 NETIF_F_GSO_IPXIP6 | \
6660 NETIF_F_GSO_UDP_TUNNEL | \
6661 NETIF_F_GSO_UDP_TUNNEL_CSUM)
6663 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6664 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6666 /* setup the private structure */
6667 err = igc_sw_init(adapter);
6671 /* copy netdev features into list of user selectable features */
6672 netdev->hw_features |= NETIF_F_NTUPLE;
6673 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6674 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6675 netdev->hw_features |= netdev->features;
6677 netdev->features |= NETIF_F_HIGHDMA;
6679 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
6680 netdev->mpls_features |= NETIF_F_HW_CSUM;
6681 netdev->hw_enc_features |= netdev->vlan_features;
6683 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6684 NETDEV_XDP_ACT_XSK_ZEROCOPY;
6686 /* MTU range: 68 - 9216 */
6687 netdev->min_mtu = ETH_MIN_MTU;
6688 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6690 /* before reading the NVM, reset the controller to put the device in a
6691 * known good starting state
6693 hw->mac.ops.reset_hw(hw);
6695 if (igc_get_flash_presence_i225(hw)) {
6696 if (hw->nvm.ops.validate(hw) < 0) {
6697 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6703 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6704 /* copy the MAC address out of the NVM */
6705 if (hw->mac.ops.read_mac_addr(hw))
6706 dev_err(&pdev->dev, "NVM Read Error\n");
6709 eth_hw_addr_set(netdev, hw->mac.addr);
6711 if (!is_valid_ether_addr(netdev->dev_addr)) {
6712 dev_err(&pdev->dev, "Invalid MAC Address\n");
6717 /* configure RXPBSIZE and TXPBSIZE */
6718 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6719 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6721 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6722 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
6724 INIT_WORK(&adapter->reset_task, igc_reset_task);
6725 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6727 /* Initialize link properties that are user-changeable */
6728 adapter->fc_autoneg = true;
6729 hw->mac.autoneg = true;
6730 hw->phy.autoneg_advertised = 0xaf;
6732 hw->fc.requested_mode = igc_fc_default;
6733 hw->fc.current_mode = igc_fc_default;
6735 /* By default, support wake on port A */
6736 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6738 /* initialize the wol settings based on the eeprom settings */
6739 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6740 adapter->wol |= IGC_WUFC_MAG;
6742 device_set_wakeup_enable(&adapter->pdev->dev,
6743 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6745 igc_ptp_init(adapter);
6747 igc_tsn_clear_schedule(adapter);
6749 /* reset the hardware with the new settings */
6752 /* let the f/w know that the h/w is now under the control of the
6755 igc_get_hw_control(adapter);
6757 strncpy(netdev->name, "eth%d", IFNAMSIZ);
6758 err = register_netdev(netdev);
6762 /* carrier off reporting is important to ethtool even BEFORE open */
6763 netif_carrier_off(netdev);
6765 /* Check if Media Autosense is enabled */
6768 /* print pcie link status and MAC address */
6769 pcie_print_link_status(pdev);
6770 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6772 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6773 /* Disable EEE for internal PHY devices */
6774 hw->dev_spec._base.eee_enable = false;
6775 adapter->flags &= ~IGC_FLAG_EEE;
6776 igc_set_eee_i225(hw, false, false, false);
6778 pm_runtime_put_noidle(&pdev->dev);
6783 igc_release_hw_control(adapter);
6785 if (!igc_check_reset_block(hw))
6788 igc_clear_interrupt_scheme(adapter);
6789 iounmap(adapter->io_addr);
6791 free_netdev(netdev);
6793 pci_release_mem_regions(pdev);
6796 pci_disable_device(pdev);
6801 * igc_remove - Device Removal Routine
6802 * @pdev: PCI device information struct
6804 * igc_remove is called by the PCI subsystem to alert the driver
6805 * that it should release a PCI device. This could be caused by a
6806 * Hot-Plug event, or because the driver is going to be removed from
6809 static void igc_remove(struct pci_dev *pdev)
6811 struct net_device *netdev = pci_get_drvdata(pdev);
6812 struct igc_adapter *adapter = netdev_priv(netdev);
6814 pm_runtime_get_noresume(&pdev->dev);
6816 igc_flush_nfc_rules(adapter);
6818 igc_ptp_stop(adapter);
6820 set_bit(__IGC_DOWN, &adapter->state);
6822 del_timer_sync(&adapter->watchdog_timer);
6823 del_timer_sync(&adapter->phy_info_timer);
6825 cancel_work_sync(&adapter->reset_task);
6826 cancel_work_sync(&adapter->watchdog_task);
6828 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6829 * would have already happened in close and is redundant.
6831 igc_release_hw_control(adapter);
6832 unregister_netdev(netdev);
6834 igc_clear_interrupt_scheme(adapter);
6835 pci_iounmap(pdev, adapter->io_addr);
6836 pci_release_mem_regions(pdev);
6838 free_netdev(netdev);
6840 pci_disable_device(pdev);
6843 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6846 struct net_device *netdev = pci_get_drvdata(pdev);
6847 struct igc_adapter *adapter = netdev_priv(netdev);
6848 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6849 struct igc_hw *hw = &adapter->hw;
6850 u32 ctrl, rctl, status;
6854 netif_device_detach(netdev);
6856 if (netif_running(netdev))
6857 __igc_close(netdev, true);
6859 igc_ptp_suspend(adapter);
6861 igc_clear_interrupt_scheme(adapter);
6864 status = rd32(IGC_STATUS);
6865 if (status & IGC_STATUS_LU)
6866 wufc &= ~IGC_WUFC_LNKC;
6869 igc_setup_rctl(adapter);
6870 igc_set_rx_mode(netdev);
6872 /* turn on all-multi mode if wake on multicast is enabled */
6873 if (wufc & IGC_WUFC_MC) {
6874 rctl = rd32(IGC_RCTL);
6875 rctl |= IGC_RCTL_MPE;
6876 wr32(IGC_RCTL, rctl);
6879 ctrl = rd32(IGC_CTRL);
6880 ctrl |= IGC_CTRL_ADVD3WUC;
6881 wr32(IGC_CTRL, ctrl);
6883 /* Allow time for pending master requests to run */
6884 igc_disable_pcie_master(hw);
6886 wr32(IGC_WUC, IGC_WUC_PME_EN);
6887 wr32(IGC_WUFC, wufc);
6893 wake = wufc || adapter->en_mng_pt;
6895 igc_power_down_phy_copper_base(&adapter->hw);
6897 igc_power_up_link(adapter);
6900 *enable_wake = wake;
6902 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6903 * would have already happened in close and is redundant.
6905 igc_release_hw_control(adapter);
6907 pci_disable_device(pdev);
6913 static int __maybe_unused igc_runtime_suspend(struct device *dev)
6915 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6918 static void igc_deliver_wake_packet(struct net_device *netdev)
6920 struct igc_adapter *adapter = netdev_priv(netdev);
6921 struct igc_hw *hw = &adapter->hw;
6922 struct sk_buff *skb;
6925 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6927 /* WUPM stores only the first 128 bytes of the wake packet.
6928 * Read the packet only if we have the whole thing.
6930 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
6933 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
6939 /* Ensure reads are 32-bit aligned */
6940 wupl = roundup(wupl, 4);
6942 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
6944 skb->protocol = eth_type_trans(skb, netdev);
6948 static int __maybe_unused igc_resume(struct device *dev)
6950 struct pci_dev *pdev = to_pci_dev(dev);
6951 struct net_device *netdev = pci_get_drvdata(pdev);
6952 struct igc_adapter *adapter = netdev_priv(netdev);
6953 struct igc_hw *hw = &adapter->hw;
6956 pci_set_power_state(pdev, PCI_D0);
6957 pci_restore_state(pdev);
6958 pci_save_state(pdev);
6960 if (!pci_device_is_present(pdev))
6962 err = pci_enable_device_mem(pdev);
6964 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
6967 pci_set_master(pdev);
6969 pci_enable_wake(pdev, PCI_D3hot, 0);
6970 pci_enable_wake(pdev, PCI_D3cold, 0);
6972 if (igc_init_interrupt_scheme(adapter, true)) {
6973 netdev_err(netdev, "Unable to allocate memory for queues\n");
6979 /* let the f/w know that the h/w is now under the control of the
6982 igc_get_hw_control(adapter);
6984 val = rd32(IGC_WUS);
6985 if (val & WAKE_PKT_WUS)
6986 igc_deliver_wake_packet(netdev);
6991 if (!err && netif_running(netdev))
6992 err = __igc_open(netdev, true);
6995 netif_device_attach(netdev);
7001 static int __maybe_unused igc_runtime_resume(struct device *dev)
7003 return igc_resume(dev);
7006 static int __maybe_unused igc_suspend(struct device *dev)
7008 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
7011 static int __maybe_unused igc_runtime_idle(struct device *dev)
7013 struct net_device *netdev = dev_get_drvdata(dev);
7014 struct igc_adapter *adapter = netdev_priv(netdev);
7016 if (!igc_has_link(adapter))
7017 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7021 #endif /* CONFIG_PM */
7023 static void igc_shutdown(struct pci_dev *pdev)
7027 __igc_shutdown(pdev, &wake, 0);
7029 if (system_state == SYSTEM_POWER_OFF) {
7030 pci_wake_from_d3(pdev, wake);
7031 pci_set_power_state(pdev, PCI_D3hot);
7036 * igc_io_error_detected - called when PCI error is detected
7037 * @pdev: Pointer to PCI device
7038 * @state: The current PCI connection state
7040 * This function is called after a PCI bus error affecting
7041 * this device has been detected.
7043 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
7044 pci_channel_state_t state)
7046 struct net_device *netdev = pci_get_drvdata(pdev);
7047 struct igc_adapter *adapter = netdev_priv(netdev);
7049 netif_device_detach(netdev);
7051 if (state == pci_channel_io_perm_failure)
7052 return PCI_ERS_RESULT_DISCONNECT;
7054 if (netif_running(netdev))
7056 pci_disable_device(pdev);
7058 /* Request a slot reset. */
7059 return PCI_ERS_RESULT_NEED_RESET;
7063 * igc_io_slot_reset - called after the PCI bus has been reset.
7064 * @pdev: Pointer to PCI device
7066 * Restart the card from scratch, as if from a cold-boot. Implementation
7067 * resembles the first-half of the igc_resume routine.
7069 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
7071 struct net_device *netdev = pci_get_drvdata(pdev);
7072 struct igc_adapter *adapter = netdev_priv(netdev);
7073 struct igc_hw *hw = &adapter->hw;
7074 pci_ers_result_t result;
7076 if (pci_enable_device_mem(pdev)) {
7077 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
7078 result = PCI_ERS_RESULT_DISCONNECT;
7080 pci_set_master(pdev);
7081 pci_restore_state(pdev);
7082 pci_save_state(pdev);
7084 pci_enable_wake(pdev, PCI_D3hot, 0);
7085 pci_enable_wake(pdev, PCI_D3cold, 0);
7087 /* In case of PCI error, adapter loses its HW address
7088 * so we should re-assign it here.
7090 hw->hw_addr = adapter->io_addr;
7094 result = PCI_ERS_RESULT_RECOVERED;
7101 * igc_io_resume - called when traffic can start to flow again.
7102 * @pdev: Pointer to PCI device
7104 * This callback is called when the error recovery driver tells us that
7105 * its OK to resume normal operation. Implementation resembles the
7106 * second-half of the igc_resume routine.
7108 static void igc_io_resume(struct pci_dev *pdev)
7110 struct net_device *netdev = pci_get_drvdata(pdev);
7111 struct igc_adapter *adapter = netdev_priv(netdev);
7114 if (netif_running(netdev)) {
7115 if (igc_open(netdev)) {
7116 netdev_err(netdev, "igc_open failed after reset\n");
7121 netif_device_attach(netdev);
7123 /* let the f/w know that the h/w is now under the control of the
7126 igc_get_hw_control(adapter);
7130 static const struct pci_error_handlers igc_err_handler = {
7131 .error_detected = igc_io_error_detected,
7132 .slot_reset = igc_io_slot_reset,
7133 .resume = igc_io_resume,
7137 static const struct dev_pm_ops igc_pm_ops = {
7138 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
7139 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
7144 static struct pci_driver igc_driver = {
7145 .name = igc_driver_name,
7146 .id_table = igc_pci_tbl,
7148 .remove = igc_remove,
7150 .driver.pm = &igc_pm_ops,
7152 .shutdown = igc_shutdown,
7153 .err_handler = &igc_err_handler,
7157 * igc_reinit_queues - return error
7158 * @adapter: pointer to adapter structure
7160 int igc_reinit_queues(struct igc_adapter *adapter)
7162 struct net_device *netdev = adapter->netdev;
7165 if (netif_running(netdev))
7168 igc_reset_interrupt_capability(adapter);
7170 if (igc_init_interrupt_scheme(adapter, true)) {
7171 netdev_err(netdev, "Unable to allocate memory for queues\n");
7175 if (netif_running(netdev))
7176 err = igc_open(netdev);
7182 * igc_get_hw_dev - return device
7183 * @hw: pointer to hardware structure
7185 * used by hardware layer to print debugging information
7187 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7189 struct igc_adapter *adapter = hw->back;
7191 return adapter->netdev;
7194 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7196 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7197 u8 idx = ring->reg_idx;
7200 rxdctl = rd32(IGC_RXDCTL(idx));
7201 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7202 rxdctl |= IGC_RXDCTL_SWFLUSH;
7203 wr32(IGC_RXDCTL(idx), rxdctl);
7206 void igc_disable_rx_ring(struct igc_ring *ring)
7208 igc_disable_rx_ring_hw(ring);
7209 igc_clean_rx_ring(ring);
7212 void igc_enable_rx_ring(struct igc_ring *ring)
7214 struct igc_adapter *adapter = ring->q_vector->adapter;
7216 igc_configure_rx_ring(adapter, ring);
7219 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7221 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7224 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
7226 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7227 u8 idx = ring->reg_idx;
7230 txdctl = rd32(IGC_TXDCTL(idx));
7231 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
7232 txdctl |= IGC_TXDCTL_SWFLUSH;
7233 wr32(IGC_TXDCTL(idx), txdctl);
7236 void igc_disable_tx_ring(struct igc_ring *ring)
7238 igc_disable_tx_ring_hw(ring);
7239 igc_clean_tx_ring(ring);
7242 void igc_enable_tx_ring(struct igc_ring *ring)
7244 struct igc_adapter *adapter = ring->q_vector->adapter;
7246 igc_configure_tx_ring(adapter, ring);
7250 * igc_init_module - Driver Registration Routine
7252 * igc_init_module is the first routine called when the driver is
7253 * loaded. All it does is register with the PCI subsystem.
7255 static int __init igc_init_module(void)
7259 pr_info("%s\n", igc_driver_string);
7260 pr_info("%s\n", igc_copyright);
7262 ret = pci_register_driver(&igc_driver);
7266 module_init(igc_init_module);
7269 * igc_exit_module - Driver Exit Cleanup Routine
7271 * igc_exit_module is called just before the driver is removed
7274 static void __exit igc_exit_module(void)
7276 pci_unregister_driver(&igc_driver);
7279 module_exit(igc_exit_module);