1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
35 NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
37 static struct ena_aenq_handlers aenq_handlers;
39 static struct workqueue_struct *ena_wq;
41 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
43 static int ena_rss_init_default(struct ena_adapter *adapter);
44 static void check_for_admin_com_state(struct ena_adapter *adapter);
45 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
46 static int ena_restore_device(struct ena_adapter *adapter);
48 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
50 enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
51 struct ena_adapter *adapter = netdev_priv(dev);
52 unsigned int time_since_last_napi, threshold;
53 struct ena_ring *tx_ring;
56 if (txqueue >= adapter->num_io_queues) {
57 netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
61 threshold = jiffies_to_usecs(dev->watchdog_timeo);
62 tx_ring = &adapter->tx_ring[txqueue];
64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
68 "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
74 if (threshold < time_since_last_napi && napi_scheduled) {
76 "napi handler hasn't been called for a long time but is scheduled\n");
77 reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
80 /* Change the state of the device to trigger reset
81 * Check that we are not in the middle or a trigger already
83 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
86 ena_reset_device(adapter, reset_reason);
87 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
90 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
94 for (i = 0; i < adapter->num_io_queues; i++)
95 adapter->rx_ring[i].mtu = mtu;
98 static int ena_change_mtu(struct net_device *dev, int new_mtu)
100 struct ena_adapter *adapter = netdev_priv(dev);
103 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
105 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
106 update_rx_ring_mtu(adapter, new_mtu);
109 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
116 int ena_xmit_common(struct ena_adapter *adapter,
117 struct ena_ring *ring,
118 struct ena_tx_buffer *tx_info,
119 struct ena_com_tx_ctx *ena_tx_ctx,
125 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
127 netif_dbg(adapter, tx_queued, adapter->netdev,
128 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
130 ena_ring_tx_doorbell(ring);
133 /* prepare the packet's descriptors to dma engine */
134 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
137 /* In case there isn't enough space in the queue for the packet,
138 * we simply drop it. All other failure reasons of
139 * ena_com_prepare_tx() are fatal and therefore require a device reset.
142 netif_err(adapter, tx_queued, adapter->netdev,
143 "Failed to prepare tx bufs\n");
144 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
146 ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
150 u64_stats_update_begin(&ring->syncp);
151 ring->tx_stats.cnt++;
152 ring->tx_stats.bytes += bytes;
153 u64_stats_update_end(&ring->syncp);
155 tx_info->tx_descs = nb_hw_desc;
156 tx_info->total_tx_size = bytes;
157 tx_info->last_jiffies = jiffies;
158 tx_info->print_once = 0;
160 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
165 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
167 #ifdef CONFIG_RFS_ACCEL
171 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
172 if (!adapter->netdev->rx_cpu_rmap)
174 for (i = 0; i < adapter->num_io_queues; i++) {
175 int irq_idx = ENA_IO_IRQ_IDX(i);
177 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
178 pci_irq_vector(adapter->pdev, irq_idx));
180 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
181 adapter->netdev->rx_cpu_rmap = NULL;
185 #endif /* CONFIG_RFS_ACCEL */
189 static void ena_init_io_rings_common(struct ena_adapter *adapter,
190 struct ena_ring *ring, u16 qid)
193 ring->pdev = adapter->pdev;
194 ring->dev = &adapter->pdev->dev;
195 ring->netdev = adapter->netdev;
196 ring->napi = &adapter->ena_napi[qid].napi;
197 ring->adapter = adapter;
198 ring->ena_dev = adapter->ena_dev;
199 ring->per_napi_packets = 0;
202 ring->no_interrupt_event_cnt = 0;
203 u64_stats_init(&ring->syncp);
206 void ena_init_io_rings(struct ena_adapter *adapter,
207 int first_index, int count)
209 struct ena_com_dev *ena_dev;
210 struct ena_ring *txr, *rxr;
213 ena_dev = adapter->ena_dev;
215 for (i = first_index; i < first_index + count; i++) {
216 txr = &adapter->tx_ring[i];
217 rxr = &adapter->rx_ring[i];
219 /* TX common ring state */
220 ena_init_io_rings_common(adapter, txr, i);
222 /* TX specific ring state */
223 txr->ring_size = adapter->requested_tx_ring_size;
224 txr->tx_max_header_size = ena_dev->tx_max_header_size;
225 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
226 txr->sgl_size = adapter->max_tx_sgl_size;
227 txr->smoothed_interval =
228 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
229 txr->disable_meta_caching = adapter->disable_meta_caching;
230 spin_lock_init(&txr->xdp_tx_lock);
232 /* Don't init RX queues for xdp queues */
233 if (!ENA_IS_XDP_INDEX(adapter, i)) {
234 /* RX common ring state */
235 ena_init_io_rings_common(adapter, rxr, i);
237 /* RX specific ring state */
238 rxr->ring_size = adapter->requested_rx_ring_size;
239 rxr->rx_copybreak = adapter->rx_copybreak;
240 rxr->sgl_size = adapter->max_rx_sgl_size;
241 rxr->smoothed_interval =
242 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
243 rxr->empty_rx_queue = 0;
244 rxr->rx_headroom = NET_SKB_PAD;
245 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
251 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
252 * @adapter: network interface device structure
255 * Return 0 on success, negative on failure
257 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
259 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
260 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
263 if (tx_ring->tx_buffer_info) {
264 netif_err(adapter, ifup,
265 adapter->netdev, "tx_buffer_info info is not NULL");
269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
270 node = cpu_to_node(ena_irq->cpu);
272 tx_ring->tx_buffer_info = vzalloc_node(size, node);
273 if (!tx_ring->tx_buffer_info) {
274 tx_ring->tx_buffer_info = vzalloc(size);
275 if (!tx_ring->tx_buffer_info)
276 goto err_tx_buffer_info;
279 size = sizeof(u16) * tx_ring->ring_size;
280 tx_ring->free_ids = vzalloc_node(size, node);
281 if (!tx_ring->free_ids) {
282 tx_ring->free_ids = vzalloc(size);
283 if (!tx_ring->free_ids)
284 goto err_tx_free_ids;
287 size = tx_ring->tx_max_header_size;
288 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
289 if (!tx_ring->push_buf_intermediate_buf) {
290 tx_ring->push_buf_intermediate_buf = vzalloc(size);
291 if (!tx_ring->push_buf_intermediate_buf)
292 goto err_push_buf_intermediate_buf;
295 /* Req id ring for TX out of order completions */
296 for (i = 0; i < tx_ring->ring_size; i++)
297 tx_ring->free_ids[i] = i;
299 /* Reset tx statistics */
300 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
302 tx_ring->next_to_use = 0;
303 tx_ring->next_to_clean = 0;
304 tx_ring->cpu = ena_irq->cpu;
305 tx_ring->numa_node = node;
308 err_push_buf_intermediate_buf:
309 vfree(tx_ring->free_ids);
310 tx_ring->free_ids = NULL;
312 vfree(tx_ring->tx_buffer_info);
313 tx_ring->tx_buffer_info = NULL;
318 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
319 * @adapter: network interface device structure
322 * Free all transmit software resources
324 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
326 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
328 vfree(tx_ring->tx_buffer_info);
329 tx_ring->tx_buffer_info = NULL;
331 vfree(tx_ring->free_ids);
332 tx_ring->free_ids = NULL;
334 vfree(tx_ring->push_buf_intermediate_buf);
335 tx_ring->push_buf_intermediate_buf = NULL;
338 int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
339 int first_index, int count)
343 for (i = first_index; i < first_index + count; i++) {
344 rc = ena_setup_tx_resources(adapter, i);
353 netif_err(adapter, ifup, adapter->netdev,
354 "Tx queue %d: allocation failed\n", i);
356 /* rewind the index freeing the rings as we go */
357 while (first_index < i--)
358 ena_free_tx_resources(adapter, i);
362 void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
363 int first_index, int count)
367 for (i = first_index; i < first_index + count; i++)
368 ena_free_tx_resources(adapter, i);
371 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
372 * @adapter: board private structure
374 * Free all transmit software resources
376 void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
378 ena_free_all_io_tx_resources_in_range(adapter,
380 adapter->xdp_num_queues +
381 adapter->num_io_queues);
384 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
385 * @adapter: network interface device structure
388 * Returns 0 on success, negative on failure
390 static int ena_setup_rx_resources(struct ena_adapter *adapter,
393 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
394 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
397 if (rx_ring->rx_buffer_info) {
398 netif_err(adapter, ifup, adapter->netdev,
399 "rx_buffer_info is not NULL");
403 /* alloc extra element so in rx path
404 * we can always prefetch rx_info + 1
406 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
407 node = cpu_to_node(ena_irq->cpu);
409 rx_ring->rx_buffer_info = vzalloc_node(size, node);
410 if (!rx_ring->rx_buffer_info) {
411 rx_ring->rx_buffer_info = vzalloc(size);
412 if (!rx_ring->rx_buffer_info)
416 size = sizeof(u16) * rx_ring->ring_size;
417 rx_ring->free_ids = vzalloc_node(size, node);
418 if (!rx_ring->free_ids) {
419 rx_ring->free_ids = vzalloc(size);
420 if (!rx_ring->free_ids) {
421 vfree(rx_ring->rx_buffer_info);
422 rx_ring->rx_buffer_info = NULL;
427 /* Req id ring for receiving RX pkts out of order */
428 for (i = 0; i < rx_ring->ring_size; i++)
429 rx_ring->free_ids[i] = i;
431 /* Reset rx statistics */
432 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
434 rx_ring->next_to_clean = 0;
435 rx_ring->next_to_use = 0;
436 rx_ring->cpu = ena_irq->cpu;
437 rx_ring->numa_node = node;
442 /* ena_free_rx_resources - Free I/O Rx Resources
443 * @adapter: network interface device structure
446 * Free all receive software resources
448 static void ena_free_rx_resources(struct ena_adapter *adapter,
451 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
453 vfree(rx_ring->rx_buffer_info);
454 rx_ring->rx_buffer_info = NULL;
456 vfree(rx_ring->free_ids);
457 rx_ring->free_ids = NULL;
460 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
461 * @adapter: board private structure
463 * Return 0 on success, negative on failure
465 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
469 for (i = 0; i < adapter->num_io_queues; i++) {
470 rc = ena_setup_rx_resources(adapter, i);
479 netif_err(adapter, ifup, adapter->netdev,
480 "Rx queue %d: allocation failed\n", i);
482 /* rewind the index freeing the rings as we go */
484 ena_free_rx_resources(adapter, i);
488 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
489 * @adapter: board private structure
491 * Free all receive software resources
493 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
497 for (i = 0; i < adapter->num_io_queues; i++)
498 ena_free_rx_resources(adapter, i);
501 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
506 /* This would allocate the page on the same NUMA node the executing code
509 page = dev_alloc_page();
511 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
512 return ERR_PTR(-ENOSPC);
515 /* To enable NIC-side port-mirroring, AKA SPAN port,
516 * we make the buffer readable from the nic as well
518 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
520 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
521 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
524 return ERR_PTR(-EIO);
530 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
531 struct ena_rx_buffer *rx_info)
533 int headroom = rx_ring->rx_headroom;
534 struct ena_com_buf *ena_buf;
539 /* restore page offset value in case it has been changed by device */
540 rx_info->buf_offset = headroom;
542 /* if previous allocated page is not used */
543 if (unlikely(rx_info->page))
546 /* We handle DMA here */
547 page = ena_alloc_map_page(rx_ring, &dma);
549 return PTR_ERR(page);
551 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
552 "Allocate page %p, rx_info %p\n", page, rx_info);
554 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
556 rx_info->page = page;
557 rx_info->dma_addr = dma;
558 rx_info->page_offset = 0;
559 ena_buf = &rx_info->ena_buf;
560 ena_buf->paddr = dma + headroom;
561 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
566 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
567 struct ena_rx_buffer *rx_info,
570 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
574 static void ena_free_rx_page(struct ena_ring *rx_ring,
575 struct ena_rx_buffer *rx_info)
577 struct page *page = rx_info->page;
579 if (unlikely(!page)) {
580 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
581 "Trying to free unallocated buffer\n");
585 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
588 rx_info->page = NULL;
591 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
593 u16 next_to_use, req_id;
597 next_to_use = rx_ring->next_to_use;
599 for (i = 0; i < num; i++) {
600 struct ena_rx_buffer *rx_info;
602 req_id = rx_ring->free_ids[next_to_use];
604 rx_info = &rx_ring->rx_buffer_info[req_id];
606 rc = ena_alloc_rx_buffer(rx_ring, rx_info);
607 if (unlikely(rc < 0)) {
608 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
609 "Failed to allocate buffer for rx queue %d\n",
613 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
617 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
618 "Failed to add buffer for rx queue %d\n",
622 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
626 if (unlikely(i < num)) {
627 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
629 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
630 "Refilled rx qid %d with only %d buffers (from %d)\n",
631 rx_ring->qid, i, num);
634 /* ena_com_write_sq_doorbell issues a wmb() */
636 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
638 rx_ring->next_to_use = next_to_use;
643 static void ena_free_rx_bufs(struct ena_adapter *adapter,
646 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
649 for (i = 0; i < rx_ring->ring_size; i++) {
650 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
653 ena_free_rx_page(rx_ring, rx_info);
657 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
658 * @adapter: board private structure
660 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
662 struct ena_ring *rx_ring;
665 for (i = 0; i < adapter->num_io_queues; i++) {
666 rx_ring = &adapter->rx_ring[i];
667 bufs_num = rx_ring->ring_size - 1;
668 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
670 if (unlikely(rc != bufs_num))
671 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
672 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
677 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
681 for (i = 0; i < adapter->num_io_queues; i++)
682 ena_free_rx_bufs(adapter, i);
685 void ena_unmap_tx_buff(struct ena_ring *tx_ring,
686 struct ena_tx_buffer *tx_info)
688 struct ena_com_buf *ena_buf;
692 ena_buf = tx_info->bufs;
693 cnt = tx_info->num_of_bufs;
698 if (tx_info->map_linear_data) {
699 dma_unmap_single(tx_ring->dev,
700 dma_unmap_addr(ena_buf, paddr),
701 dma_unmap_len(ena_buf, len),
707 /* unmap remaining mapped pages */
708 for (i = 0; i < cnt; i++) {
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
710 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
715 /* ena_free_tx_bufs - Free Tx Buffers per Queue
716 * @tx_ring: TX ring for which buffers be freed
718 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
720 bool print_once = true;
724 is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
726 for (i = 0; i < tx_ring->ring_size; i++) {
727 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
733 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
734 "Free uncompleted tx skb qid %d idx 0x%x\n",
738 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
739 "Free uncompleted tx skb qid %d idx 0x%x\n",
743 ena_unmap_tx_buff(tx_ring, tx_info);
746 xdp_return_frame(tx_info->xdpf);
748 dev_kfree_skb_any(tx_info->skb);
752 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
756 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
758 struct ena_ring *tx_ring;
761 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
762 tx_ring = &adapter->tx_ring[i];
763 ena_free_tx_bufs(tx_ring);
767 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
772 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
773 ena_qid = ENA_IO_TXQ_IDX(i);
774 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
778 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
783 for (i = 0; i < adapter->num_io_queues; i++) {
784 ena_qid = ENA_IO_RXQ_IDX(i);
785 cancel_work_sync(&adapter->ena_napi[i].dim.work);
786 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
787 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
791 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
793 ena_destroy_all_tx_queues(adapter);
794 ena_destroy_all_rx_queues(adapter);
797 int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
798 struct ena_tx_buffer *tx_info, bool is_xdp)
801 netif_err(ring->adapter,
804 "tx_info doesn't have valid %s. qid %u req_id %u",
805 is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
807 netif_err(ring->adapter,
810 "Invalid req_id %u in qid %u\n",
813 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
814 ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
819 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
821 struct ena_tx_buffer *tx_info;
823 tx_info = &tx_ring->tx_buffer_info[req_id];
824 if (likely(tx_info->skb))
827 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
830 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
832 struct netdev_queue *txq;
841 next_to_clean = tx_ring->next_to_clean;
842 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
844 while (tx_pkts < budget) {
845 struct ena_tx_buffer *tx_info;
848 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
851 if (unlikely(rc == -EINVAL))
852 handle_invalid_req_id(tx_ring, req_id, NULL, false);
856 /* validate that the request id points to a valid skb */
857 rc = validate_tx_req_id(tx_ring, req_id);
861 tx_info = &tx_ring->tx_buffer_info[req_id];
864 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
868 tx_info->last_jiffies = 0;
870 ena_unmap_tx_buff(tx_ring, tx_info);
872 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
873 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
876 tx_bytes += tx_info->total_tx_size;
879 total_done += tx_info->tx_descs;
881 tx_ring->free_ids[next_to_clean] = req_id;
882 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
886 tx_ring->next_to_clean = next_to_clean;
887 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
889 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
891 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
892 "tx_poll: q %d done. total pkts: %d\n",
893 tx_ring->qid, tx_pkts);
895 /* need to make the rings circular update visible to
896 * ena_start_xmit() before checking for netif_queue_stopped().
900 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
901 ENA_TX_WAKEUP_THRESH);
902 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
903 __netif_tx_lock(txq, smp_processor_id());
905 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
906 ENA_TX_WAKEUP_THRESH);
907 if (netif_tx_queue_stopped(txq) && above_thresh &&
908 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
909 netif_tx_wake_queue(txq);
910 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
913 __netif_tx_unlock(txq);
919 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
924 skb = napi_alloc_skb(rx_ring->napi, len);
926 skb = napi_build_skb(first_frag, len);
928 if (unlikely(!skb)) {
929 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
932 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
933 "Failed to allocate skb. first_frag %s\n",
934 first_frag ? "provided" : "not provided");
940 static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
941 u16 len, int pkt_offset)
943 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
945 /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
946 * for data + headroom + tailroom.
948 if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
949 page_ref_inc(rx_info->page);
950 rx_info->page_offset += buf_len;
951 ena_buf->paddr += buf_len;
952 ena_buf->len -= buf_len;
959 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
960 struct ena_com_rx_buf_info *ena_bufs,
964 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
965 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
966 struct ena_rx_buffer *rx_info;
967 struct ena_adapter *adapter;
968 int page_offset, pkt_offset;
969 dma_addr_t pre_reuse_paddr;
970 u16 len, req_id, buf = 0;
971 bool reuse_rx_buf_page;
977 len = ena_bufs[buf].len;
978 req_id = ena_bufs[buf].req_id;
980 rx_info = &rx_ring->rx_buffer_info[req_id];
982 if (unlikely(!rx_info->page)) {
983 adapter = rx_ring->adapter;
984 netif_err(adapter, rx_err, rx_ring->netdev,
985 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
986 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
987 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
991 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
992 "rx_info %p page %p\n",
993 rx_info, rx_info->page);
995 buf_offset = rx_info->buf_offset;
996 pkt_offset = buf_offset - rx_ring->rx_headroom;
997 page_offset = rx_info->page_offset;
998 buf_addr = page_address(rx_info->page) + page_offset;
1000 if (len <= rx_ring->rx_copybreak) {
1001 skb = ena_alloc_skb(rx_ring, NULL, len);
1005 skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
1006 dma_sync_single_for_device(rx_ring->dev,
1007 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1012 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1013 "RX allocated small packet. len %d.\n", skb->len);
1014 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1015 rx_ring->free_ids[*next_to_clean] = req_id;
1016 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1017 rx_ring->ring_size);
1021 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1023 /* If XDP isn't loaded try to reuse part of the RX buffer */
1024 reuse_rx_buf_page = !is_xdp_loaded &&
1025 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1027 if (!reuse_rx_buf_page)
1028 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
1030 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
1034 /* Populate skb's linear part */
1035 skb_reserve(skb, buf_offset);
1037 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1040 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1041 "RX skb updated. len %d. data_len %d\n",
1042 skb->len, skb->data_len);
1044 if (!reuse_rx_buf_page)
1045 rx_info->page = NULL;
1047 rx_ring->free_ids[*next_to_clean] = req_id;
1049 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1050 rx_ring->ring_size);
1051 if (likely(--descs == 0))
1055 len = ena_bufs[buf].len;
1056 req_id = ena_bufs[buf].req_id;
1058 rx_info = &rx_ring->rx_buffer_info[req_id];
1060 /* rx_info->buf_offset includes rx_ring->rx_headroom */
1061 buf_offset = rx_info->buf_offset;
1062 pkt_offset = buf_offset - rx_ring->rx_headroom;
1063 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1064 page_offset = rx_info->page_offset;
1066 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1068 reuse_rx_buf_page = !is_xdp_loaded &&
1069 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1071 dma_sync_single_for_cpu(rx_ring->dev,
1072 pre_reuse_paddr + pkt_offset,
1076 if (!reuse_rx_buf_page)
1077 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
1079 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1080 page_offset + buf_offset, len, buf_len);
1087 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1088 * @adapter: structure containing adapter specific data
1089 * @ena_rx_ctx: received packet context/metadata
1090 * @skb: skb currently being received and modified
1092 static void ena_rx_checksum(struct ena_ring *rx_ring,
1093 struct ena_com_rx_ctx *ena_rx_ctx,
1094 struct sk_buff *skb)
1096 /* Rx csum disabled */
1097 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1098 skb->ip_summed = CHECKSUM_NONE;
1102 /* For fragmented packets the checksum isn't valid */
1103 if (ena_rx_ctx->frag) {
1104 skb->ip_summed = CHECKSUM_NONE;
1108 /* if IP and error */
1109 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1110 (ena_rx_ctx->l3_csum_err))) {
1111 /* ipv4 checksum error */
1112 skb->ip_summed = CHECKSUM_NONE;
1113 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1115 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1116 "RX IPv4 header checksum error\n");
1121 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1122 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1123 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1124 /* TCP/UDP checksum error */
1125 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1127 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1128 "RX L4 checksum error\n");
1129 skb->ip_summed = CHECKSUM_NONE;
1133 if (likely(ena_rx_ctx->l4_csum_checked)) {
1134 skb->ip_summed = CHECKSUM_UNNECESSARY;
1135 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
1138 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
1140 skb->ip_summed = CHECKSUM_NONE;
1143 skb->ip_summed = CHECKSUM_NONE;
1149 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1150 struct ena_com_rx_ctx *ena_rx_ctx,
1151 struct sk_buff *skb)
1153 enum pkt_hash_types hash_type;
1155 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1156 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1157 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1159 hash_type = PKT_HASH_TYPE_L4;
1161 hash_type = PKT_HASH_TYPE_NONE;
1163 /* Override hash type if the packet is fragmented */
1164 if (ena_rx_ctx->frag)
1165 hash_type = PKT_HASH_TYPE_NONE;
1167 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1171 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
1173 struct ena_rx_buffer *rx_info;
1176 /* XDP multi-buffer packets not supported */
1177 if (unlikely(num_descs > 1)) {
1178 netdev_err_once(rx_ring->adapter->netdev,
1179 "xdp: dropped unsupported multi-buffer packets\n");
1180 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
1181 return ENA_XDP_DROP;
1184 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1185 xdp_prepare_buff(xdp, page_address(rx_info->page),
1186 rx_info->buf_offset,
1187 rx_ring->ena_bufs[0].len, false);
1189 ret = ena_xdp_execute(rx_ring, xdp);
1191 /* The xdp program might expand the headers */
1192 if (ret == ENA_XDP_PASS) {
1193 rx_info->buf_offset = xdp->data - xdp->data_hard_start;
1194 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1200 /* ena_clean_rx_irq - Cleanup RX irq
1201 * @rx_ring: RX ring to clean
1202 * @napi: napi handler
1203 * @budget: how many packets driver is allowed to clean
1205 * Returns the number of cleaned buffers.
1207 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1210 u16 next_to_clean = rx_ring->next_to_clean;
1211 struct ena_com_rx_ctx ena_rx_ctx;
1212 struct ena_rx_buffer *rx_info;
1213 struct ena_adapter *adapter;
1214 u32 res_budget, work_done;
1215 int rx_copybreak_pkt = 0;
1216 int refill_threshold;
1217 struct sk_buff *skb;
1218 int refill_required;
1219 struct xdp_buff xdp;
1227 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1228 "%s qid %d\n", __func__, rx_ring->qid);
1229 res_budget = budget;
1230 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
1233 xdp_verdict = ENA_XDP_PASS;
1235 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1236 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1237 ena_rx_ctx.descs = 0;
1238 ena_rx_ctx.pkt_offset = 0;
1239 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1240 rx_ring->ena_com_io_sq,
1245 if (unlikely(ena_rx_ctx.descs == 0))
1248 /* First descriptor might have an offset set by the device */
1249 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1250 pkt_offset = ena_rx_ctx.pkt_offset;
1251 rx_info->buf_offset += pkt_offset;
1253 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1254 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1255 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1256 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1258 dma_sync_single_for_cpu(rx_ring->dev,
1259 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1260 rx_ring->ena_bufs[0].len,
1263 if (ena_xdp_present_ring(rx_ring))
1264 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
1266 /* allocate skb and fill it */
1267 if (xdp_verdict == ENA_XDP_PASS)
1268 skb = ena_rx_skb(rx_ring,
1273 if (unlikely(!skb)) {
1274 for (i = 0; i < ena_rx_ctx.descs; i++) {
1275 int req_id = rx_ring->ena_bufs[i].req_id;
1277 rx_ring->free_ids[next_to_clean] = req_id;
1279 ENA_RX_RING_IDX_NEXT(next_to_clean,
1280 rx_ring->ring_size);
1282 /* Packets was passed for transmission, unmap it
1285 if (xdp_verdict & ENA_XDP_FORWARDED) {
1286 ena_unmap_rx_buff_attrs(rx_ring,
1287 &rx_ring->rx_buffer_info[req_id],
1288 DMA_ATTR_SKIP_CPU_SYNC);
1289 rx_ring->rx_buffer_info[req_id].page = NULL;
1292 if (xdp_verdict != ENA_XDP_PASS) {
1293 xdp_flags |= xdp_verdict;
1294 total_len += ena_rx_ctx.ena_bufs[0].len;
1301 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1303 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1305 skb_record_rx_queue(skb, rx_ring->qid);
1307 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
1310 total_len += skb->len;
1312 napi_gro_receive(napi, skb);
1315 } while (likely(res_budget));
1317 work_done = budget - res_budget;
1318 rx_ring->per_napi_packets += work_done;
1319 u64_stats_update_begin(&rx_ring->syncp);
1320 rx_ring->rx_stats.bytes += total_len;
1321 rx_ring->rx_stats.cnt += work_done;
1322 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1323 u64_stats_update_end(&rx_ring->syncp);
1325 rx_ring->next_to_clean = next_to_clean;
1327 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1329 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1330 ENA_RX_REFILL_THRESH_PACKET);
1332 /* Optimization, try to batch new rx buffers */
1333 if (refill_required > refill_threshold)
1334 ena_refill_rx_bufs(rx_ring, refill_required);
1336 if (xdp_flags & ENA_XDP_REDIRECT)
1342 if (xdp_flags & ENA_XDP_REDIRECT)
1345 adapter = netdev_priv(rx_ring->netdev);
1347 if (rc == -ENOSPC) {
1348 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
1349 ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
1351 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
1353 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
1358 static void ena_dim_work(struct work_struct *w)
1360 struct dim *dim = container_of(w, struct dim, work);
1361 struct dim_cq_moder cur_moder =
1362 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1363 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1365 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1366 dim->state = DIM_START_MEASURE;
1369 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1371 struct dim_sample dim_sample;
1372 struct ena_ring *rx_ring = ena_napi->rx_ring;
1374 if (!rx_ring->per_napi_packets)
1377 rx_ring->non_empty_napi_events++;
1379 dim_update_sample(rx_ring->non_empty_napi_events,
1380 rx_ring->rx_stats.cnt,
1381 rx_ring->rx_stats.bytes,
1384 net_dim(&ena_napi->dim, dim_sample);
1386 rx_ring->per_napi_packets = 0;
1389 void ena_unmask_interrupt(struct ena_ring *tx_ring,
1390 struct ena_ring *rx_ring)
1392 u32 rx_interval = tx_ring->smoothed_interval;
1393 struct ena_eth_io_intr_reg intr_reg;
1395 /* Rx ring can be NULL when for XDP tx queues which don't have an
1396 * accompanying rx_ring pair.
1399 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1400 rx_ring->smoothed_interval :
1401 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1403 /* Update intr register: rx intr delay,
1404 * tx intr delay and interrupt unmask
1406 ena_com_update_intr_reg(&intr_reg,
1408 tx_ring->smoothed_interval,
1411 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1414 /* It is a shared MSI-X.
1415 * Tx and Rx CQ have pointer to it.
1416 * So we use one of them to reach the intr reg
1417 * The Tx ring is used because the rx_ring is NULL for XDP queues
1419 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1422 void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1423 struct ena_ring *rx_ring)
1425 int cpu = get_cpu();
1428 /* Check only one ring since the 2 rings are running on the same cpu */
1429 if (likely(tx_ring->cpu == cpu))
1436 numa_node = cpu_to_node(cpu);
1438 if (likely(tx_ring->numa_node == numa_node))
1443 if (numa_node != NUMA_NO_NODE) {
1444 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1445 tx_ring->numa_node = numa_node;
1447 rx_ring->numa_node = numa_node;
1448 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1458 static int ena_io_poll(struct napi_struct *napi, int budget)
1460 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1461 struct ena_ring *tx_ring, *rx_ring;
1463 int rx_work_done = 0;
1465 int napi_comp_call = 0;
1468 tx_ring = ena_napi->tx_ring;
1469 rx_ring = ena_napi->rx_ring;
1471 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1473 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1474 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1475 napi_complete_done(napi, 0);
1479 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1480 /* On netpoll the budget is zero and the handler should only clean the
1484 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1486 /* If the device is about to reset or down, avoid unmask
1487 * the interrupt and return 0 so NAPI won't reschedule
1489 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1490 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1491 napi_complete_done(napi, 0);
1494 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1497 /* Update numa and unmask the interrupt only when schedule
1498 * from the interrupt context (vs from sk_busy_loop)
1500 if (napi_complete_done(napi, rx_work_done) &&
1501 READ_ONCE(ena_napi->interrupts_masked)) {
1502 smp_rmb(); /* make sure interrupts_masked is read */
1503 WRITE_ONCE(ena_napi->interrupts_masked, false);
1504 /* We apply adaptive moderation on Rx path only.
1505 * Tx uses static interrupt moderation.
1507 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1508 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1510 ena_update_ring_numa_node(tx_ring, rx_ring);
1511 ena_unmask_interrupt(tx_ring, rx_ring);
1519 u64_stats_update_begin(&tx_ring->syncp);
1520 tx_ring->tx_stats.napi_comp += napi_comp_call;
1521 tx_ring->tx_stats.tx_poll++;
1522 u64_stats_update_end(&tx_ring->syncp);
1524 tx_ring->tx_stats.last_napi_jiffies = jiffies;
1529 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1531 struct ena_adapter *adapter = (struct ena_adapter *)data;
1533 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1535 /* Don't call the aenq handler before probe is done */
1536 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1537 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1542 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1543 * @irq: interrupt number
1544 * @data: pointer to a network interface private napi device structure
1546 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1548 struct ena_napi *ena_napi = data;
1550 /* Used to check HW health */
1551 WRITE_ONCE(ena_napi->first_interrupt, true);
1553 WRITE_ONCE(ena_napi->interrupts_masked, true);
1554 smp_wmb(); /* write interrupts_masked before calling napi */
1556 napi_schedule_irqoff(&ena_napi->napi);
1561 /* Reserve a single MSI-X vector for management (admin + aenq).
1562 * plus reserve one vector for each potential io queue.
1563 * the number of potential io queues is the minimum of what the device
1564 * supports and the number of vCPUs.
1566 static int ena_enable_msix(struct ena_adapter *adapter)
1568 int msix_vecs, irq_cnt;
1570 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1571 netif_err(adapter, probe, adapter->netdev,
1572 "Error, MSI-X is already enabled\n");
1576 /* Reserved the max msix vectors we might need */
1577 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1578 netif_dbg(adapter, probe, adapter->netdev,
1579 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
1581 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1582 msix_vecs, PCI_IRQ_MSIX);
1585 netif_err(adapter, probe, adapter->netdev,
1586 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1590 if (irq_cnt != msix_vecs) {
1591 netif_notice(adapter, probe, adapter->netdev,
1592 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
1593 irq_cnt, msix_vecs);
1594 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1597 if (ena_init_rx_cpu_rmap(adapter))
1598 netif_warn(adapter, probe, adapter->netdev,
1599 "Failed to map IRQs to CPUs\n");
1601 adapter->msix_vecs = irq_cnt;
1602 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1607 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1611 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1612 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1613 pci_name(adapter->pdev));
1614 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1615 ena_intr_msix_mgmnt;
1616 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1617 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1618 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1619 cpu = cpumask_first(cpu_online_mask);
1620 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1621 cpumask_set_cpu(cpu,
1622 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1625 static void ena_setup_io_intr(struct ena_adapter *adapter)
1627 struct net_device *netdev;
1628 int irq_idx, i, cpu;
1631 netdev = adapter->netdev;
1632 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1634 for (i = 0; i < io_queue_count; i++) {
1635 irq_idx = ENA_IO_IRQ_IDX(i);
1636 cpu = i % num_online_cpus();
1638 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1639 "%s-Tx-Rx-%d", netdev->name, i);
1640 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1641 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1642 adapter->irq_tbl[irq_idx].vector =
1643 pci_irq_vector(adapter->pdev, irq_idx);
1644 adapter->irq_tbl[irq_idx].cpu = cpu;
1646 cpumask_set_cpu(cpu,
1647 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1651 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1653 unsigned long flags = 0;
1654 struct ena_irq *irq;
1657 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1658 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1661 netif_err(adapter, probe, adapter->netdev,
1662 "Failed to request admin irq\n");
1666 netif_dbg(adapter, probe, adapter->netdev,
1667 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1668 irq->affinity_hint_mask.bits[0], irq->vector);
1670 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1675 static int ena_request_io_irq(struct ena_adapter *adapter)
1677 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1678 unsigned long flags = 0;
1679 struct ena_irq *irq;
1682 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1683 netif_err(adapter, ifup, adapter->netdev,
1684 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1688 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1689 irq = &adapter->irq_tbl[i];
1690 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1693 netif_err(adapter, ifup, adapter->netdev,
1694 "Failed to request I/O IRQ. index %d rc %d\n",
1699 netif_dbg(adapter, ifup, adapter->netdev,
1700 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1701 i, irq->affinity_hint_mask.bits[0], irq->vector);
1703 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1709 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1710 irq = &adapter->irq_tbl[k];
1711 free_irq(irq->vector, irq->data);
1717 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1719 struct ena_irq *irq;
1721 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1722 synchronize_irq(irq->vector);
1723 irq_set_affinity_hint(irq->vector, NULL);
1724 free_irq(irq->vector, irq->data);
1727 static void ena_free_io_irq(struct ena_adapter *adapter)
1729 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1730 struct ena_irq *irq;
1733 #ifdef CONFIG_RFS_ACCEL
1734 if (adapter->msix_vecs >= 1) {
1735 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1736 adapter->netdev->rx_cpu_rmap = NULL;
1738 #endif /* CONFIG_RFS_ACCEL */
1740 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1741 irq = &adapter->irq_tbl[i];
1742 irq_set_affinity_hint(irq->vector, NULL);
1743 free_irq(irq->vector, irq->data);
1747 static void ena_disable_msix(struct ena_adapter *adapter)
1749 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1750 pci_free_irq_vectors(adapter->pdev);
1753 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1755 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1758 if (!netif_running(adapter->netdev))
1761 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
1762 synchronize_irq(adapter->irq_tbl[i].vector);
1765 static void ena_del_napi_in_range(struct ena_adapter *adapter,
1771 for (i = first_index; i < first_index + count; i++) {
1772 netif_napi_del(&adapter->ena_napi[i].napi);
1774 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
1775 adapter->ena_napi[i].rx_ring);
1779 static void ena_init_napi_in_range(struct ena_adapter *adapter,
1780 int first_index, int count)
1782 int (*napi_handler)(struct napi_struct *napi, int budget);
1785 for (i = first_index; i < first_index + count; i++) {
1786 struct ena_napi *napi = &adapter->ena_napi[i];
1787 struct ena_ring *rx_ring, *tx_ring;
1789 memset(napi, 0, sizeof(*napi));
1791 rx_ring = &adapter->rx_ring[i];
1792 tx_ring = &adapter->tx_ring[i];
1794 napi_handler = ena_io_poll;
1795 if (ENA_IS_XDP_INDEX(adapter, i))
1796 napi_handler = ena_xdp_io_poll;
1798 netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
1800 if (!ENA_IS_XDP_INDEX(adapter, i))
1801 napi->rx_ring = rx_ring;
1803 napi->tx_ring = tx_ring;
1808 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
1814 for (i = first_index; i < first_index + count; i++)
1815 napi_disable(&adapter->ena_napi[i].napi);
1818 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
1824 for (i = first_index; i < first_index + count; i++)
1825 napi_enable(&adapter->ena_napi[i].napi);
1828 /* Configure the Rx forwarding */
1829 static int ena_rss_configure(struct ena_adapter *adapter)
1831 struct ena_com_dev *ena_dev = adapter->ena_dev;
1834 /* In case the RSS table wasn't initialized by probe */
1835 if (!ena_dev->rss.tbl_log_size) {
1836 rc = ena_rss_init_default(adapter);
1837 if (rc && (rc != -EOPNOTSUPP)) {
1838 netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
1843 /* Set indirect table */
1844 rc = ena_com_indirect_table_set(ena_dev);
1845 if (unlikely(rc && rc != -EOPNOTSUPP))
1848 /* Configure hash function (if supported) */
1849 rc = ena_com_set_hash_function(ena_dev);
1850 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1853 /* Configure hash inputs (if supported) */
1854 rc = ena_com_set_hash_ctrl(ena_dev);
1855 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1861 static int ena_up_complete(struct ena_adapter *adapter)
1865 rc = ena_rss_configure(adapter);
1869 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1871 ena_refill_all_rx_bufs(adapter);
1873 /* enable transmits */
1874 netif_tx_start_all_queues(adapter->netdev);
1876 ena_napi_enable_in_range(adapter,
1878 adapter->xdp_num_queues + adapter->num_io_queues);
1883 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1885 struct ena_com_create_io_ctx ctx;
1886 struct ena_com_dev *ena_dev;
1887 struct ena_ring *tx_ring;
1892 ena_dev = adapter->ena_dev;
1894 tx_ring = &adapter->tx_ring[qid];
1895 msix_vector = ENA_IO_IRQ_IDX(qid);
1896 ena_qid = ENA_IO_TXQ_IDX(qid);
1898 memset(&ctx, 0x0, sizeof(ctx));
1900 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1902 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1903 ctx.msix_vector = msix_vector;
1904 ctx.queue_size = tx_ring->ring_size;
1905 ctx.numa_node = tx_ring->numa_node;
1907 rc = ena_com_create_io_queue(ena_dev, &ctx);
1909 netif_err(adapter, ifup, adapter->netdev,
1910 "Failed to create I/O TX queue num %d rc: %d\n",
1915 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1916 &tx_ring->ena_com_io_sq,
1917 &tx_ring->ena_com_io_cq);
1919 netif_err(adapter, ifup, adapter->netdev,
1920 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1922 ena_com_destroy_io_queue(ena_dev, ena_qid);
1926 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1930 int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
1931 int first_index, int count)
1933 struct ena_com_dev *ena_dev = adapter->ena_dev;
1936 for (i = first_index; i < first_index + count; i++) {
1937 rc = ena_create_io_tx_queue(adapter, i);
1945 while (i-- > first_index)
1946 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1951 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1953 struct ena_com_dev *ena_dev;
1954 struct ena_com_create_io_ctx ctx;
1955 struct ena_ring *rx_ring;
1960 ena_dev = adapter->ena_dev;
1962 rx_ring = &adapter->rx_ring[qid];
1963 msix_vector = ENA_IO_IRQ_IDX(qid);
1964 ena_qid = ENA_IO_RXQ_IDX(qid);
1966 memset(&ctx, 0x0, sizeof(ctx));
1969 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1970 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1971 ctx.msix_vector = msix_vector;
1972 ctx.queue_size = rx_ring->ring_size;
1973 ctx.numa_node = rx_ring->numa_node;
1975 rc = ena_com_create_io_queue(ena_dev, &ctx);
1977 netif_err(adapter, ifup, adapter->netdev,
1978 "Failed to create I/O RX queue num %d rc: %d\n",
1983 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1984 &rx_ring->ena_com_io_sq,
1985 &rx_ring->ena_com_io_cq);
1987 netif_err(adapter, ifup, adapter->netdev,
1988 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1993 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1997 ena_com_destroy_io_queue(ena_dev, ena_qid);
2001 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2003 struct ena_com_dev *ena_dev = adapter->ena_dev;
2006 for (i = 0; i < adapter->num_io_queues; i++) {
2007 rc = ena_create_io_rx_queue(adapter, i);
2010 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2012 ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
2019 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
2020 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2021 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2027 static void set_io_rings_size(struct ena_adapter *adapter,
2033 for (i = 0; i < adapter->num_io_queues; i++) {
2034 adapter->tx_ring[i].ring_size = new_tx_size;
2035 adapter->rx_ring[i].ring_size = new_rx_size;
2039 /* This function allows queue allocation to backoff when the system is
2040 * low on memory. If there is not enough memory to allocate io queues
2041 * the driver will try to allocate smaller queues.
2043 * The backoff algorithm is as follows:
2044 * 1. Try to allocate TX and RX and if successful.
2045 * 1.1. return success
2047 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2049 * 3. If TX or RX is smaller than 256
2050 * 3.1. return failure.
2052 * 4.1. go back to 1.
2054 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2056 int rc, cur_rx_ring_size, cur_tx_ring_size;
2057 int new_rx_ring_size, new_tx_ring_size;
2059 /* current queue sizes might be set to smaller than the requested
2060 * ones due to past queue allocation failures.
2062 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2063 adapter->requested_rx_ring_size);
2066 if (ena_xdp_present(adapter)) {
2067 rc = ena_setup_and_create_all_xdp_queues(adapter);
2072 rc = ena_setup_tx_resources_in_range(adapter,
2074 adapter->num_io_queues);
2078 rc = ena_create_io_tx_queues_in_range(adapter,
2080 adapter->num_io_queues);
2082 goto err_create_tx_queues;
2084 rc = ena_setup_all_rx_resources(adapter);
2088 rc = ena_create_all_io_rx_queues(adapter);
2090 goto err_create_rx_queues;
2094 err_create_rx_queues:
2095 ena_free_all_io_rx_resources(adapter);
2097 ena_destroy_all_tx_queues(adapter);
2098 err_create_tx_queues:
2099 ena_free_all_io_tx_resources(adapter);
2101 if (rc != -ENOMEM) {
2102 netif_err(adapter, ifup, adapter->netdev,
2103 "Queue creation failed with error code %d\n",
2108 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2109 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2111 netif_err(adapter, ifup, adapter->netdev,
2112 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2113 cur_tx_ring_size, cur_rx_ring_size);
2115 new_tx_ring_size = cur_tx_ring_size;
2116 new_rx_ring_size = cur_rx_ring_size;
2118 /* Decrease the size of the larger queue, or
2119 * decrease both if they are the same size.
2121 if (cur_rx_ring_size <= cur_tx_ring_size)
2122 new_tx_ring_size = cur_tx_ring_size / 2;
2123 if (cur_rx_ring_size >= cur_tx_ring_size)
2124 new_rx_ring_size = cur_rx_ring_size / 2;
2126 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2127 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2128 netif_err(adapter, ifup, adapter->netdev,
2129 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2134 netif_err(adapter, ifup, adapter->netdev,
2135 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2139 set_io_rings_size(adapter, new_tx_ring_size,
2144 int ena_up(struct ena_adapter *adapter)
2146 int io_queue_count, rc, i;
2148 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2150 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2151 ena_setup_io_intr(adapter);
2153 /* napi poll functions should be initialized before running
2154 * request_irq(), to handle a rare condition where there is a pending
2155 * interrupt, causing the ISR to fire immediately while the poll
2156 * function wasn't set yet, causing a null dereference
2158 ena_init_napi_in_range(adapter, 0, io_queue_count);
2160 /* Enabling DIM needs to happen before enabling IRQs since DIM
2161 * is run from napi routine
2163 if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
2164 ena_com_enable_adaptive_moderation(adapter->ena_dev);
2166 rc = ena_request_io_irq(adapter);
2170 rc = create_queues_with_size_backoff(adapter);
2172 goto err_create_queues_with_backoff;
2174 rc = ena_up_complete(adapter);
2178 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2179 netif_carrier_on(adapter->netdev);
2181 ena_increase_stat(&adapter->dev_stats.interface_up, 1,
2184 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2186 /* Enable completion queues interrupt */
2187 for (i = 0; i < adapter->num_io_queues; i++)
2188 ena_unmask_interrupt(&adapter->tx_ring[i],
2189 &adapter->rx_ring[i]);
2191 /* schedule napi in case we had pending packets
2192 * from the last time we disable napi
2194 for (i = 0; i < io_queue_count; i++)
2195 napi_schedule(&adapter->ena_napi[i].napi);
2200 ena_destroy_all_tx_queues(adapter);
2201 ena_free_all_io_tx_resources(adapter);
2202 ena_destroy_all_rx_queues(adapter);
2203 ena_free_all_io_rx_resources(adapter);
2204 err_create_queues_with_backoff:
2205 ena_free_io_irq(adapter);
2207 ena_del_napi_in_range(adapter, 0, io_queue_count);
2212 void ena_down(struct ena_adapter *adapter)
2214 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2216 netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2218 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2220 ena_increase_stat(&adapter->dev_stats.interface_down, 1,
2223 netif_carrier_off(adapter->netdev);
2224 netif_tx_disable(adapter->netdev);
2226 /* After this point the napi handler won't enable the tx queue */
2227 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2229 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2232 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2234 netif_err(adapter, ifdown, adapter->netdev,
2235 "Device reset failed\n");
2236 /* stop submitting admin commands on a device that was reset */
2237 ena_com_set_admin_running_state(adapter->ena_dev, false);
2240 ena_destroy_all_io_queues(adapter);
2242 ena_disable_io_intr_sync(adapter);
2243 ena_free_io_irq(adapter);
2244 ena_del_napi_in_range(adapter, 0, io_queue_count);
2246 ena_free_all_tx_bufs(adapter);
2247 ena_free_all_rx_bufs(adapter);
2248 ena_free_all_io_tx_resources(adapter);
2249 ena_free_all_io_rx_resources(adapter);
2252 /* ena_open - Called when a network interface is made active
2253 * @netdev: network interface device structure
2255 * Returns 0 on success, negative value on failure
2257 * The open entry point is called when a network interface is made
2258 * active by the system (IFF_UP). At this point all resources needed
2259 * for transmit and receive operations are allocated, the interrupt
2260 * handler is registered with the OS, the watchdog timer is started,
2261 * and the stack is notified that the interface is ready.
2263 static int ena_open(struct net_device *netdev)
2265 struct ena_adapter *adapter = netdev_priv(netdev);
2268 /* Notify the stack of the actual queue counts. */
2269 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2271 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2275 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2277 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2281 rc = ena_up(adapter);
2288 /* ena_close - Disables a network interface
2289 * @netdev: network interface device structure
2291 * Returns 0, this is not allowed to fail
2293 * The close entry point is called when an interface is de-activated
2294 * by the OS. The hardware is still under the drivers control, but
2295 * needs to be disabled. A global MAC reset is issued to stop the
2296 * hardware, and all transmit and receive resources are freed.
2298 static int ena_close(struct net_device *netdev)
2300 struct ena_adapter *adapter = netdev_priv(netdev);
2302 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2304 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2307 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2310 /* Check for device status and issue reset if needed*/
2311 check_for_admin_com_state(adapter);
2312 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2313 netif_err(adapter, ifdown, adapter->netdev,
2314 "Destroy failure, restarting device\n");
2315 ena_dump_stats_to_dmesg(adapter);
2316 /* rtnl lock already obtained in dev_ioctl() layer */
2317 ena_destroy_device(adapter, false);
2318 ena_restore_device(adapter);
2324 int ena_update_queue_params(struct ena_adapter *adapter,
2327 u32 new_llq_header_len)
2329 bool dev_was_up, large_llq_changed = false;
2332 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2333 ena_close(adapter->netdev);
2334 adapter->requested_tx_ring_size = new_tx_size;
2335 adapter->requested_rx_ring_size = new_rx_size;
2336 ena_init_io_rings(adapter,
2338 adapter->xdp_num_queues +
2339 adapter->num_io_queues);
2341 large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
2342 ENA_ADMIN_PLACEMENT_POLICY_DEV;
2343 large_llq_changed &=
2344 new_llq_header_len != adapter->ena_dev->tx_max_header_size;
2346 /* a check that the configuration is valid is done by caller */
2347 if (large_llq_changed) {
2348 adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
2350 ena_destroy_device(adapter, false);
2351 rc = ena_restore_device(adapter);
2354 return dev_was_up && !rc ? ena_up(adapter) : rc;
2357 int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
2359 struct ena_ring *rx_ring;
2362 if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
2365 adapter->rx_copybreak = rx_copybreak;
2367 for (i = 0; i < adapter->num_io_queues; i++) {
2368 rx_ring = &adapter->rx_ring[i];
2369 rx_ring->rx_copybreak = rx_copybreak;
2375 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2377 struct ena_com_dev *ena_dev = adapter->ena_dev;
2378 int prev_channel_count;
2381 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2382 ena_close(adapter->netdev);
2383 prev_channel_count = adapter->num_io_queues;
2384 adapter->num_io_queues = new_channel_count;
2385 if (ena_xdp_present(adapter) &&
2386 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2387 adapter->xdp_first_ring = new_channel_count;
2388 adapter->xdp_num_queues = new_channel_count;
2389 if (prev_channel_count > new_channel_count)
2390 ena_xdp_exchange_program_rx_in_range(adapter,
2393 prev_channel_count);
2395 ena_xdp_exchange_program_rx_in_range(adapter,
2396 adapter->xdp_bpf_prog,
2401 /* We need to destroy the rss table so that the indirection
2402 * table will be reinitialized by ena_up()
2404 ena_com_rss_destroy(ena_dev);
2405 ena_init_io_rings(adapter,
2407 adapter->xdp_num_queues +
2408 adapter->num_io_queues);
2409 return dev_was_up ? ena_open(adapter->netdev) : 0;
2412 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2413 struct sk_buff *skb,
2414 bool disable_meta_caching)
2416 u32 mss = skb_shinfo(skb)->gso_size;
2417 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2420 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2421 ena_tx_ctx->l4_csum_enable = 1;
2423 ena_tx_ctx->tso_enable = 1;
2424 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2425 ena_tx_ctx->l4_csum_partial = 0;
2427 ena_tx_ctx->tso_enable = 0;
2428 ena_meta->l4_hdr_len = 0;
2429 ena_tx_ctx->l4_csum_partial = 1;
2432 switch (ip_hdr(skb)->version) {
2434 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2435 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2438 ena_tx_ctx->l3_csum_enable = 1;
2439 l4_protocol = ip_hdr(skb)->protocol;
2442 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2443 l4_protocol = ipv6_hdr(skb)->nexthdr;
2449 if (l4_protocol == IPPROTO_TCP)
2450 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2452 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2454 ena_meta->mss = mss;
2455 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2456 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2457 ena_tx_ctx->meta_valid = 1;
2458 } else if (disable_meta_caching) {
2459 memset(ena_meta, 0, sizeof(*ena_meta));
2460 ena_tx_ctx->meta_valid = 1;
2462 ena_tx_ctx->meta_valid = 0;
2466 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2467 struct sk_buff *skb)
2469 int num_frags, header_len, rc;
2471 num_frags = skb_shinfo(skb)->nr_frags;
2472 header_len = skb_headlen(skb);
2474 if (num_frags < tx_ring->sgl_size)
2477 if ((num_frags == tx_ring->sgl_size) &&
2478 (header_len < tx_ring->tx_max_header_size))
2481 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
2483 rc = skb_linearize(skb);
2485 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
2492 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2493 struct ena_tx_buffer *tx_info,
2494 struct sk_buff *skb,
2498 struct ena_adapter *adapter = tx_ring->adapter;
2499 struct ena_com_buf *ena_buf;
2501 u32 skb_head_len, frag_len, last_frag;
2506 skb_head_len = skb_headlen(skb);
2508 ena_buf = tx_info->bufs;
2510 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2511 /* When the device is LLQ mode, the driver will copy
2512 * the header into the device memory space.
2513 * the ena_com layer assume the header is in a linear
2515 * This assumption might be wrong since part of the header
2516 * can be in the fragmented buffers.
2517 * Use skb_header_pointer to make sure the header is in a
2518 * linear memory space.
2521 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2522 *push_hdr = skb_header_pointer(skb, 0, push_len,
2523 tx_ring->push_buf_intermediate_buf);
2524 *header_len = push_len;
2525 if (unlikely(skb->data != *push_hdr)) {
2526 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
2529 delta = push_len - skb_head_len;
2533 *header_len = min_t(u32, skb_head_len,
2534 tx_ring->tx_max_header_size);
2537 netif_dbg(adapter, tx_queued, adapter->netdev,
2538 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2539 *push_hdr, push_len);
2541 if (skb_head_len > push_len) {
2542 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2543 skb_head_len - push_len, DMA_TO_DEVICE);
2544 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2545 goto error_report_dma_error;
2547 ena_buf->paddr = dma;
2548 ena_buf->len = skb_head_len - push_len;
2551 tx_info->num_of_bufs++;
2552 tx_info->map_linear_data = 1;
2554 tx_info->map_linear_data = 0;
2557 last_frag = skb_shinfo(skb)->nr_frags;
2559 for (i = 0; i < last_frag; i++) {
2560 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562 frag_len = skb_frag_size(frag);
2564 if (unlikely(delta >= frag_len)) {
2569 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2570 frag_len - delta, DMA_TO_DEVICE);
2571 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2572 goto error_report_dma_error;
2574 ena_buf->paddr = dma;
2575 ena_buf->len = frag_len - delta;
2577 tx_info->num_of_bufs++;
2583 error_report_dma_error:
2584 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
2586 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
2588 tx_info->skb = NULL;
2590 tx_info->num_of_bufs += i;
2591 ena_unmap_tx_buff(tx_ring, tx_info);
2596 /* Called with netif_tx_lock. */
2597 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2599 struct ena_adapter *adapter = netdev_priv(dev);
2600 struct ena_tx_buffer *tx_info;
2601 struct ena_com_tx_ctx ena_tx_ctx;
2602 struct ena_ring *tx_ring;
2603 struct netdev_queue *txq;
2605 u16 next_to_use, req_id, header_len;
2608 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2609 /* Determine which tx ring we will be placed on */
2610 qid = skb_get_queue_mapping(skb);
2611 tx_ring = &adapter->tx_ring[qid];
2612 txq = netdev_get_tx_queue(dev, qid);
2614 rc = ena_check_and_linearize_skb(tx_ring, skb);
2616 goto error_drop_packet;
2618 next_to_use = tx_ring->next_to_use;
2619 req_id = tx_ring->free_ids[next_to_use];
2620 tx_info = &tx_ring->tx_buffer_info[req_id];
2621 tx_info->num_of_bufs = 0;
2623 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2625 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2627 goto error_drop_packet;
2629 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2630 ena_tx_ctx.ena_bufs = tx_info->bufs;
2631 ena_tx_ctx.push_header = push_hdr;
2632 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2633 ena_tx_ctx.req_id = req_id;
2634 ena_tx_ctx.header_len = header_len;
2636 /* set flags and meta data */
2637 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
2639 rc = ena_xmit_common(adapter,
2646 goto error_unmap_dma;
2648 netdev_tx_sent_queue(txq, skb->len);
2650 /* stop the queue when no more space available, the packet can have up
2651 * to sgl_size + 2. one for the meta descriptor and one for header
2652 * (if the header is larger than tx_max_header_size).
2654 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2655 tx_ring->sgl_size + 2))) {
2656 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2659 netif_tx_stop_queue(txq);
2660 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
2663 /* There is a rare condition where this function decide to
2664 * stop the queue but meanwhile clean_tx_irq updates
2665 * next_to_completion and terminates.
2666 * The queue will remain stopped forever.
2667 * To solve this issue add a mb() to make sure that
2668 * netif_tx_stop_queue() write is vissible before checking if
2669 * there is additional space in the queue.
2673 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2674 ENA_TX_WAKEUP_THRESH)) {
2675 netif_tx_wake_queue(txq);
2676 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
2681 skb_tx_timestamp(skb);
2683 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2684 /* trigger the dma engine. ena_ring_tx_doorbell()
2685 * calls a memory barrier inside it.
2687 ena_ring_tx_doorbell(tx_ring);
2689 return NETDEV_TX_OK;
2692 ena_unmap_tx_buff(tx_ring, tx_info);
2693 tx_info->skb = NULL;
2697 return NETDEV_TX_OK;
2700 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
2702 struct device *dev = &pdev->dev;
2703 struct ena_admin_host_info *host_info;
2706 /* Allocate only the host info */
2707 rc = ena_com_allocate_host_info(ena_dev);
2709 dev_err(dev, "Cannot allocate host info\n");
2713 host_info = ena_dev->host_attr.host_info;
2715 host_info->bdf = pci_dev_id(pdev);
2716 host_info->os_type = ENA_ADMIN_OS_LINUX;
2717 host_info->kernel_ver = LINUX_VERSION_CODE;
2718 strscpy(host_info->kernel_ver_str, utsname()->version,
2719 sizeof(host_info->kernel_ver_str) - 1);
2720 host_info->os_dist = 0;
2721 strscpy(host_info->os_dist_str, utsname()->release,
2722 sizeof(host_info->os_dist_str));
2723 host_info->driver_version =
2724 (DRV_MODULE_GEN_MAJOR) |
2725 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2726 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2727 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2728 host_info->num_cpus = num_online_cpus();
2730 host_info->driver_supported_features =
2731 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
2732 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
2733 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
2734 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
2735 ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
2737 rc = ena_com_set_host_attributes(ena_dev);
2739 if (rc == -EOPNOTSUPP)
2740 dev_warn(dev, "Cannot set host attributes\n");
2742 dev_err(dev, "Cannot set host attributes\n");
2750 ena_com_delete_host_info(ena_dev);
2753 static void ena_config_debug_area(struct ena_adapter *adapter)
2755 u32 debug_area_size;
2758 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2759 if (ss_count <= 0) {
2760 netif_err(adapter, drv, adapter->netdev,
2761 "SS count is negative\n");
2765 /* allocate 32 bytes for each string and 64bit for the value */
2766 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2768 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2770 netif_err(adapter, drv, adapter->netdev,
2771 "Cannot allocate debug area\n");
2775 rc = ena_com_set_host_attributes(adapter->ena_dev);
2777 if (rc == -EOPNOTSUPP)
2778 netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
2780 netif_err(adapter, drv, adapter->netdev,
2781 "Cannot set host attributes\n");
2787 ena_com_delete_debug_area(adapter->ena_dev);
2790 int ena_update_hw_stats(struct ena_adapter *adapter)
2794 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
2796 netdev_err(adapter->netdev, "Failed to get ENI stats\n");
2803 static void ena_get_stats64(struct net_device *netdev,
2804 struct rtnl_link_stats64 *stats)
2806 struct ena_adapter *adapter = netdev_priv(netdev);
2807 struct ena_ring *rx_ring, *tx_ring;
2808 u64 total_xdp_rx_drops = 0;
2814 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2817 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
2818 u64 bytes, packets, xdp_rx_drops;
2820 tx_ring = &adapter->tx_ring[i];
2823 start = u64_stats_fetch_begin(&tx_ring->syncp);
2824 packets = tx_ring->tx_stats.cnt;
2825 bytes = tx_ring->tx_stats.bytes;
2826 } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
2828 stats->tx_packets += packets;
2829 stats->tx_bytes += bytes;
2831 /* In XDP there isn't an RX queue counterpart */
2832 if (ENA_IS_XDP_INDEX(adapter, i))
2835 rx_ring = &adapter->rx_ring[i];
2838 start = u64_stats_fetch_begin(&rx_ring->syncp);
2839 packets = rx_ring->rx_stats.cnt;
2840 bytes = rx_ring->rx_stats.bytes;
2841 xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
2842 } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
2844 stats->rx_packets += packets;
2845 stats->rx_bytes += bytes;
2846 total_xdp_rx_drops += xdp_rx_drops;
2850 start = u64_stats_fetch_begin(&adapter->syncp);
2851 rx_drops = adapter->dev_stats.rx_drops;
2852 tx_drops = adapter->dev_stats.tx_drops;
2853 } while (u64_stats_fetch_retry(&adapter->syncp, start));
2855 stats->rx_dropped = rx_drops + total_xdp_rx_drops;
2856 stats->tx_dropped = tx_drops;
2858 stats->multicast = 0;
2859 stats->collisions = 0;
2861 stats->rx_length_errors = 0;
2862 stats->rx_crc_errors = 0;
2863 stats->rx_frame_errors = 0;
2864 stats->rx_fifo_errors = 0;
2865 stats->rx_missed_errors = 0;
2866 stats->tx_window_errors = 0;
2868 stats->rx_errors = 0;
2869 stats->tx_errors = 0;
2872 static const struct net_device_ops ena_netdev_ops = {
2873 .ndo_open = ena_open,
2874 .ndo_stop = ena_close,
2875 .ndo_start_xmit = ena_start_xmit,
2876 .ndo_get_stats64 = ena_get_stats64,
2877 .ndo_tx_timeout = ena_tx_timeout,
2878 .ndo_change_mtu = ena_change_mtu,
2879 .ndo_validate_addr = eth_validate_addr,
2881 .ndo_xdp_xmit = ena_xdp_xmit,
2884 static int ena_calc_io_queue_size(struct ena_adapter *adapter,
2885 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2887 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
2888 struct ena_com_dev *ena_dev = adapter->ena_dev;
2889 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
2890 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
2891 u32 max_tx_queue_size;
2892 u32 max_rx_queue_size;
2894 /* If this function is called after driver load, the ring sizes have already
2895 * been configured. Take it into account when recalculating ring size.
2897 if (adapter->tx_ring->ring_size)
2898 tx_queue_size = adapter->tx_ring->ring_size;
2900 if (adapter->rx_ring->ring_size)
2901 rx_queue_size = adapter->rx_ring->ring_size;
2903 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2904 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2905 &get_feat_ctx->max_queue_ext.max_queue_ext;
2906 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
2907 max_queue_ext->max_rx_sq_depth);
2908 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2910 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2911 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2912 llq->max_llq_depth);
2914 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2915 max_queue_ext->max_tx_sq_depth);
2917 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2918 max_queue_ext->max_per_packet_tx_descs);
2919 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2920 max_queue_ext->max_per_packet_rx_descs);
2922 struct ena_admin_queue_feature_desc *max_queues =
2923 &get_feat_ctx->max_queues;
2924 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
2925 max_queues->max_sq_depth);
2926 max_tx_queue_size = max_queues->max_cq_depth;
2928 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2929 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2930 llq->max_llq_depth);
2932 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2933 max_queues->max_sq_depth);
2935 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2936 max_queues->max_packet_tx_descs);
2937 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2938 max_queues->max_packet_rx_descs);
2941 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
2942 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
2944 if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
2945 netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
2946 max_tx_queue_size, ENA_MIN_RING_SIZE);
2950 if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
2951 netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
2952 max_rx_queue_size, ENA_MIN_RING_SIZE);
2956 /* When forcing large headers, we multiply the entry size by 2, and therefore divide
2957 * the queue size by 2, leaving the amount of memory used by the queues unchanged.
2959 if (adapter->large_llq_header_enabled) {
2960 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
2961 ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2962 max_tx_queue_size /= 2;
2963 dev_info(&adapter->pdev->dev,
2964 "Forcing large headers and decreasing maximum TX queue size to %d\n",
2967 dev_err(&adapter->pdev->dev,
2968 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
2970 adapter->large_llq_header_enabled = false;
2974 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2976 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2979 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
2980 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
2982 adapter->max_tx_ring_size = max_tx_queue_size;
2983 adapter->max_rx_ring_size = max_rx_queue_size;
2984 adapter->requested_tx_ring_size = tx_queue_size;
2985 adapter->requested_rx_ring_size = rx_queue_size;
2990 static int ena_device_validate_params(struct ena_adapter *adapter,
2991 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2993 struct net_device *netdev = adapter->netdev;
2996 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2999 netif_err(adapter, drv, netdev,
3000 "Error, mac address are different\n");
3004 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3005 netif_err(adapter, drv, netdev,
3006 "Error, device max mtu is smaller than netdev MTU\n");
3013 static void set_default_llq_configurations(struct ena_adapter *adapter,
3014 struct ena_llq_configurations *llq_config,
3015 struct ena_admin_feature_llq_desc *llq)
3017 struct ena_com_dev *ena_dev = adapter->ena_dev;
3019 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3020 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3021 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3023 adapter->large_llq_header_supported =
3024 !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
3025 adapter->large_llq_header_supported &=
3026 !!(llq->entry_size_ctrl_supported &
3027 ENA_ADMIN_LIST_ENTRY_SIZE_256B);
3029 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
3030 adapter->large_llq_header_enabled) {
3031 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
3032 llq_config->llq_ring_entry_size_value = 256;
3034 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3035 llq_config->llq_ring_entry_size_value = 128;
3039 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3040 struct ena_com_dev *ena_dev,
3041 struct ena_admin_feature_llq_desc *llq,
3042 struct ena_llq_configurations *llq_default_configurations)
3045 u32 llq_feature_mask;
3047 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3048 if (!(ena_dev->supported_features & llq_feature_mask)) {
3049 dev_warn(&pdev->dev,
3050 "LLQ is not supported Fallback to host mode policy.\n");
3051 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3055 if (!ena_dev->mem_bar) {
3056 netdev_err(ena_dev->net_device,
3057 "LLQ is advertised as supported but device doesn't expose mem bar\n");
3058 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3062 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3065 "Failed to configure the device mode. Fallback to host mode policy.\n");
3066 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3072 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3075 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3080 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3081 pci_resource_start(pdev, ENA_MEM_BAR),
3082 pci_resource_len(pdev, ENA_MEM_BAR));
3084 if (!ena_dev->mem_bar)
3090 static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
3091 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3094 struct ena_com_dev *ena_dev = adapter->ena_dev;
3095 struct net_device *netdev = adapter->netdev;
3096 struct ena_llq_configurations llq_config;
3097 struct device *dev = &pdev->dev;
3098 bool readless_supported;
3103 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3105 dev_err(dev, "Failed to init mmio read less\n");
3109 /* The PCIe configuration space revision id indicate if mmio reg
3112 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3113 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3115 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3117 dev_err(dev, "Can not reset device\n");
3118 goto err_mmio_read_less;
3121 rc = ena_com_validate_version(ena_dev);
3123 dev_err(dev, "Device version is too low\n");
3124 goto err_mmio_read_less;
3127 dma_width = ena_com_get_dma_width(ena_dev);
3128 if (dma_width < 0) {
3129 dev_err(dev, "Invalid dma width value %d", dma_width);
3131 goto err_mmio_read_less;
3134 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3136 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3137 goto err_mmio_read_less;
3140 /* ENA admin level init */
3141 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3144 "Can not initialize ena admin queue with device\n");
3145 goto err_mmio_read_less;
3148 /* To enable the msix interrupts the driver needs to know the number
3149 * of queues. So the driver uses polling mode to retrieve this
3152 ena_com_set_admin_polling_mode(ena_dev, true);
3154 ena_config_host_info(ena_dev, pdev);
3156 /* Get Device Attributes*/
3157 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3159 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3160 goto err_admin_init;
3163 /* Try to turn all the available aenq groups */
3164 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3165 BIT(ENA_ADMIN_FATAL_ERROR) |
3166 BIT(ENA_ADMIN_WARNING) |
3167 BIT(ENA_ADMIN_NOTIFICATION) |
3168 BIT(ENA_ADMIN_KEEP_ALIVE);
3170 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3172 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3174 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3175 goto err_admin_init;
3178 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3180 set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
3182 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3185 netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
3186 goto err_admin_init;
3189 rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
3191 goto err_admin_init;
3196 ena_com_abort_admin_commands(ena_dev);
3197 ena_com_wait_for_abort_completion(ena_dev);
3198 ena_com_delete_host_info(ena_dev);
3199 ena_com_admin_destroy(ena_dev);
3201 ena_com_mmio_reg_read_request_destroy(ena_dev);
3206 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3208 struct ena_com_dev *ena_dev = adapter->ena_dev;
3209 struct device *dev = &adapter->pdev->dev;
3212 rc = ena_enable_msix(adapter);
3214 dev_err(dev, "Can not reserve msix vectors\n");
3218 ena_setup_mgmnt_intr(adapter);
3220 rc = ena_request_mgmnt_irq(adapter);
3222 dev_err(dev, "Can not setup management interrupts\n");
3223 goto err_disable_msix;
3226 ena_com_set_admin_polling_mode(ena_dev, false);
3228 ena_com_admin_aenq_enable(ena_dev);
3233 ena_disable_msix(adapter);
3238 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3240 struct net_device *netdev = adapter->netdev;
3241 struct ena_com_dev *ena_dev = adapter->ena_dev;
3244 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3247 netif_carrier_off(netdev);
3249 del_timer_sync(&adapter->timer_service);
3251 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3252 adapter->dev_up_before_reset = dev_up;
3254 ena_com_set_admin_running_state(ena_dev, false);
3259 /* Stop the device from sending AENQ events (in case reset flag is set
3260 * and device is up, ena_down() already reset the device.
3262 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3263 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3265 ena_free_mgmnt_irq(adapter);
3267 ena_disable_msix(adapter);
3269 ena_com_abort_admin_commands(ena_dev);
3271 ena_com_wait_for_abort_completion(ena_dev);
3273 ena_com_admin_destroy(ena_dev);
3275 ena_com_mmio_reg_read_request_destroy(ena_dev);
3277 /* return reset reason to default value */
3278 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3280 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3281 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3284 static int ena_restore_device(struct ena_adapter *adapter)
3286 struct ena_com_dev_get_features_ctx get_feat_ctx;
3287 struct ena_com_dev *ena_dev = adapter->ena_dev;
3288 struct pci_dev *pdev = adapter->pdev;
3289 struct ena_ring *txr;
3293 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3294 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
3296 dev_err(&pdev->dev, "Can not initialize device\n");
3299 adapter->wd_state = wd_state;
3301 count = adapter->xdp_num_queues + adapter->num_io_queues;
3302 for (i = 0 ; i < count; i++) {
3303 txr = &adapter->tx_ring[i];
3304 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
3305 txr->tx_max_header_size = ena_dev->tx_max_header_size;
3308 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3310 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3311 goto err_device_destroy;
3314 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3316 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3317 goto err_device_destroy;
3319 /* If the interface was up before the reset bring it up */
3320 if (adapter->dev_up_before_reset) {
3321 rc = ena_up(adapter);
3323 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3324 goto err_disable_msix;
3328 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3330 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3331 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3332 netif_carrier_on(adapter->netdev);
3334 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3335 adapter->last_keep_alive_jiffies = jiffies;
3339 ena_free_mgmnt_irq(adapter);
3340 ena_disable_msix(adapter);
3342 ena_com_abort_admin_commands(ena_dev);
3343 ena_com_wait_for_abort_completion(ena_dev);
3344 ena_com_admin_destroy(ena_dev);
3345 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3346 ena_com_mmio_reg_read_request_destroy(ena_dev);
3348 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3349 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3351 "Reset attempt failed. Can not reset the device\n");
3356 static void ena_fw_reset_device(struct work_struct *work)
3358 struct ena_adapter *adapter =
3359 container_of(work, struct ena_adapter, reset_task);
3363 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3364 ena_destroy_device(adapter, false);
3365 ena_restore_device(adapter);
3367 dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
3373 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3374 struct ena_ring *rx_ring)
3376 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
3378 if (likely(READ_ONCE(ena_napi->first_interrupt)))
3381 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3384 rx_ring->no_interrupt_event_cnt++;
3386 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3387 netif_err(adapter, rx_err, adapter->netdev,
3388 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3391 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3398 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3399 struct ena_ring *tx_ring)
3401 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
3402 enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
3403 unsigned int time_since_last_napi;
3404 unsigned int missing_tx_comp_to;
3405 bool is_tx_comp_time_expired;
3406 struct ena_tx_buffer *tx_buf;
3407 unsigned long last_jiffies;
3412 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
3414 for (i = 0; i < tx_ring->ring_size; i++) {
3415 tx_buf = &tx_ring->tx_buffer_info[i];
3416 last_jiffies = tx_buf->last_jiffies;
3418 if (last_jiffies == 0)
3419 /* no pending Tx at this location */
3422 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3423 2 * adapter->missing_tx_completion_to);
3425 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
3426 /* If after graceful period interrupt is still not
3427 * received, we schedule a reset
3429 netif_err(adapter, tx_err, adapter->netdev,
3430 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3432 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3436 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3437 adapter->missing_tx_completion_to);
3439 if (unlikely(is_tx_comp_time_expired)) {
3440 time_since_last_napi =
3441 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
3442 napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
3444 if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
3445 /* We suspect napi isn't called because the
3446 * bottom half is not run. Require a bigger
3447 * timeout for these cases
3449 if (!time_is_before_jiffies(last_jiffies +
3450 2 * adapter->missing_tx_completion_to))
3453 reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
3458 if (tx_buf->print_once)
3461 netif_notice(adapter, tx_err, adapter->netdev,
3462 "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
3463 tx_ring->qid, i, time_since_last_napi, napi_scheduled);
3465 tx_buf->print_once = 1;
3469 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3470 netif_err(adapter, tx_err, adapter->netdev,
3471 "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
3473 adapter->missing_tx_completion_threshold,
3474 missing_tx_comp_to);
3475 netif_err(adapter, tx_err, adapter->netdev,
3476 "Resetting the device\n");
3478 ena_reset_device(adapter, reset_reason);
3482 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
3488 static void check_for_missing_completions(struct ena_adapter *adapter)
3490 struct ena_ring *tx_ring;
3491 struct ena_ring *rx_ring;
3492 int qid, budget, rc;
3495 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3497 /* Make sure the driver doesn't turn the device in other process */
3500 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3503 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3506 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3509 budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
3511 qid = adapter->last_monitored_tx_qid;
3514 qid = (qid + 1) % io_queue_count;
3516 tx_ring = &adapter->tx_ring[qid];
3517 rx_ring = &adapter->rx_ring[qid];
3519 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3523 rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
3524 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3531 adapter->last_monitored_tx_qid = qid;
3534 /* trigger napi schedule after 2 consecutive detections */
3535 #define EMPTY_RX_REFILL 2
3536 /* For the rare case where the device runs out of Rx descriptors and the
3537 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3539 * This case will lead to a deadlock:
3540 * The device won't send interrupts since all the new Rx packets will be dropped
3541 * The napi handler won't allocate new Rx descriptors so the device will be
3542 * able to send new packets.
3544 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3545 * It is recommended to have at least 512MB, with a minimum of 128MB for
3546 * constrained environment).
3548 * When such a situation is detected - Reschedule napi
3550 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3552 struct ena_ring *rx_ring;
3553 int i, refill_required;
3555 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3558 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3561 for (i = 0; i < adapter->num_io_queues; i++) {
3562 rx_ring = &adapter->rx_ring[i];
3564 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3565 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3566 rx_ring->empty_rx_queue++;
3568 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3569 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
3572 netif_err(adapter, drv, adapter->netdev,
3573 "Trigger refill for ring %d\n", i);
3575 napi_schedule(rx_ring->napi);
3576 rx_ring->empty_rx_queue = 0;
3579 rx_ring->empty_rx_queue = 0;
3584 /* Check for keep alive expiration */
3585 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3587 unsigned long keep_alive_expired;
3589 if (!adapter->wd_state)
3592 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3595 keep_alive_expired = adapter->last_keep_alive_jiffies +
3596 adapter->keep_alive_timeout;
3597 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3598 netif_err(adapter, drv, adapter->netdev,
3599 "Keep alive watchdog timeout.\n");
3600 ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
3602 ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
3606 static void check_for_admin_com_state(struct ena_adapter *adapter)
3608 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3609 netif_err(adapter, drv, adapter->netdev,
3610 "ENA admin queue is not in running state!\n");
3611 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
3613 ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
3617 static void ena_update_hints(struct ena_adapter *adapter,
3618 struct ena_admin_ena_hw_hints *hints)
3620 struct net_device *netdev = adapter->netdev;
3622 if (hints->admin_completion_tx_timeout)
3623 adapter->ena_dev->admin_queue.completion_timeout =
3624 hints->admin_completion_tx_timeout * 1000;
3626 if (hints->mmio_read_timeout)
3627 /* convert to usec */
3628 adapter->ena_dev->mmio_read.reg_read_to =
3629 hints->mmio_read_timeout * 1000;
3631 if (hints->missed_tx_completion_count_threshold_to_reset)
3632 adapter->missing_tx_completion_threshold =
3633 hints->missed_tx_completion_count_threshold_to_reset;
3635 if (hints->missing_tx_completion_timeout) {
3636 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3637 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3639 adapter->missing_tx_completion_to =
3640 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3643 if (hints->netdev_wd_timeout)
3644 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3646 if (hints->driver_watchdog_timeout) {
3647 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3648 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3650 adapter->keep_alive_timeout =
3651 msecs_to_jiffies(hints->driver_watchdog_timeout);
3655 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3656 struct net_device *netdev)
3658 host_info->supported_network_features[0] =
3659 netdev->features & GENMASK_ULL(31, 0);
3660 host_info->supported_network_features[1] =
3661 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3664 static void ena_timer_service(struct timer_list *t)
3666 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3667 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3668 struct ena_admin_host_info *host_info =
3669 adapter->ena_dev->host_attr.host_info;
3671 check_for_missing_keep_alive(adapter);
3673 check_for_admin_com_state(adapter);
3675 check_for_missing_completions(adapter);
3677 check_for_empty_rx_ring(adapter);
3680 ena_dump_stats_to_buf(adapter, debug_area);
3683 ena_update_host_info(host_info, adapter->netdev);
3685 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3686 netif_err(adapter, drv, adapter->netdev,
3687 "Trigger reset is on\n");
3688 ena_dump_stats_to_dmesg(adapter);
3689 queue_work(ena_wq, &adapter->reset_task);
3693 /* Reset the timer */
3694 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3697 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3698 struct ena_com_dev *ena_dev,
3699 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3701 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3703 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3704 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3705 &get_feat_ctx->max_queue_ext.max_queue_ext;
3706 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3707 max_queue_ext->max_rx_cq_num);
3709 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3710 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3712 struct ena_admin_queue_feature_desc *max_queues =
3713 &get_feat_ctx->max_queues;
3714 io_tx_sq_num = max_queues->max_sq_num;
3715 io_tx_cq_num = max_queues->max_cq_num;
3716 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3719 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3720 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3721 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3723 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3724 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3725 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3726 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3727 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
3728 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3730 return max_num_io_queues;
3733 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3734 struct net_device *netdev)
3736 netdev_features_t dev_features = 0;
3738 /* Set offload features */
3739 if (feat->offload.tx &
3740 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3741 dev_features |= NETIF_F_IP_CSUM;
3743 if (feat->offload.tx &
3744 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3745 dev_features |= NETIF_F_IPV6_CSUM;
3747 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3748 dev_features |= NETIF_F_TSO;
3750 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3751 dev_features |= NETIF_F_TSO6;
3753 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3754 dev_features |= NETIF_F_TSO_ECN;
3756 if (feat->offload.rx_supported &
3757 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3758 dev_features |= NETIF_F_RXCSUM;
3760 if (feat->offload.rx_supported &
3761 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3762 dev_features |= NETIF_F_RXCSUM;
3770 netdev->hw_features |= netdev->features;
3771 netdev->vlan_features |= netdev->features;
3774 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3775 struct ena_com_dev_get_features_ctx *feat)
3777 struct net_device *netdev = adapter->netdev;
3779 /* Copy mac address */
3780 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3781 eth_hw_addr_random(netdev);
3782 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3784 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3785 eth_hw_addr_set(netdev, adapter->mac_addr);
3788 /* Set offload features */
3789 ena_set_dev_offloads(feat, netdev);
3791 adapter->max_mtu = feat->dev_attr.max_mtu;
3792 netdev->max_mtu = adapter->max_mtu;
3793 netdev->min_mtu = ENA_MIN_MTU;
3796 static int ena_rss_init_default(struct ena_adapter *adapter)
3798 struct ena_com_dev *ena_dev = adapter->ena_dev;
3799 struct device *dev = &adapter->pdev->dev;
3803 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3805 dev_err(dev, "Cannot init indirect table\n");
3809 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3810 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
3811 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3812 ENA_IO_RXQ_IDX(val));
3814 dev_err(dev, "Cannot fill indirect table\n");
3815 goto err_fill_indir;
3819 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
3821 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3822 dev_err(dev, "Cannot fill hash function\n");
3823 goto err_fill_indir;
3826 rc = ena_com_set_default_hash_ctrl(ena_dev);
3827 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3828 dev_err(dev, "Cannot fill hash control\n");
3829 goto err_fill_indir;
3835 ena_com_rss_destroy(ena_dev);
3841 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3843 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3845 pci_release_selected_regions(pdev, release_bars);
3848 /* ena_probe - Device Initialization Routine
3849 * @pdev: PCI device information struct
3850 * @ent: entry in ena_pci_tbl
3852 * Returns 0 on success, negative on failure
3854 * ena_probe initializes an adapter identified by a pci_dev structure.
3855 * The OS initialization, configuring of the adapter private structure,
3856 * and a hardware reset occur.
3858 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3860 struct ena_com_dev_get_features_ctx get_feat_ctx;
3861 struct ena_com_dev *ena_dev = NULL;
3862 struct ena_adapter *adapter;
3863 struct net_device *netdev;
3864 static int adapters_found;
3865 u32 max_num_io_queues;
3869 dev_dbg(&pdev->dev, "%s\n", __func__);
3871 rc = pci_enable_device_mem(pdev);
3873 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3877 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
3879 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
3880 goto err_disable_device;
3883 pci_set_master(pdev);
3885 ena_dev = vzalloc(sizeof(*ena_dev));
3888 goto err_disable_device;
3891 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3892 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3894 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3896 goto err_free_ena_dev;
3899 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3900 pci_resource_start(pdev, ENA_REG_BAR),
3901 pci_resource_len(pdev, ENA_REG_BAR));
3902 if (!ena_dev->reg_bar) {
3903 dev_err(&pdev->dev, "Failed to remap regs bar\n");
3905 goto err_free_region;
3908 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
3910 ena_dev->dmadev = &pdev->dev;
3912 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
3914 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3916 goto err_free_region;
3919 SET_NETDEV_DEV(netdev, &pdev->dev);
3920 adapter = netdev_priv(netdev);
3921 adapter->ena_dev = ena_dev;
3922 adapter->netdev = netdev;
3923 adapter->pdev = pdev;
3924 adapter->msg_enable = DEFAULT_MSG_ENABLE;
3926 ena_dev->net_device = netdev;
3928 pci_set_drvdata(pdev, adapter);
3930 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
3932 dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
3933 goto err_netdev_destroy;
3936 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
3938 dev_err(&pdev->dev, "ENA device init failed\n");
3941 goto err_netdev_destroy;
3944 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
3945 * Updated during device initialization with the real granularity
3947 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3948 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
3949 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
3950 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3951 if (unlikely(!max_num_io_queues)) {
3953 goto err_device_destroy;
3956 ena_set_conf_feat_params(adapter, &get_feat_ctx);
3958 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3960 adapter->num_io_queues = max_num_io_queues;
3961 adapter->max_num_io_queues = max_num_io_queues;
3962 adapter->last_monitored_tx_qid = 0;
3964 adapter->xdp_first_ring = 0;
3965 adapter->xdp_num_queues = 0;
3967 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3968 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3969 adapter->disable_meta_caching =
3970 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3971 BIT(ENA_ADMIN_DISABLE_META_CACHING));
3973 adapter->wd_state = wd_state;
3975 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3977 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3980 "Failed to query interrupt moderation feature\n");
3981 goto err_device_destroy;
3984 ena_init_io_rings(adapter,
3986 adapter->xdp_num_queues +
3987 adapter->num_io_queues);
3989 netdev->netdev_ops = &ena_netdev_ops;
3990 netdev->watchdog_timeo = TX_TIMEOUT;
3991 ena_set_ethtool_ops(netdev);
3993 netdev->priv_flags |= IFF_UNICAST_FLT;
3995 u64_stats_init(&adapter->syncp);
3997 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4000 "Failed to enable and set the admin interrupts\n");
4001 goto err_worker_destroy;
4003 rc = ena_rss_init_default(adapter);
4004 if (rc && (rc != -EOPNOTSUPP)) {
4005 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4009 ena_config_debug_area(adapter);
4011 if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
4012 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
4013 NETDEV_XDP_ACT_REDIRECT;
4015 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4017 netif_carrier_off(netdev);
4019 rc = register_netdev(netdev);
4021 dev_err(&pdev->dev, "Cannot register net device\n");
4025 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4027 adapter->last_keep_alive_jiffies = jiffies;
4028 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4029 adapter->missing_tx_completion_to = TX_TIMEOUT;
4030 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4032 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4034 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4035 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4037 dev_info(&pdev->dev,
4038 "%s found at mem %lx, mac addr %pM\n",
4039 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4042 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4049 ena_com_delete_debug_area(ena_dev);
4050 ena_com_rss_destroy(ena_dev);
4052 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4053 /* stop submitting admin commands on a device that was reset */
4054 ena_com_set_admin_running_state(ena_dev, false);
4055 ena_free_mgmnt_irq(adapter);
4056 ena_disable_msix(adapter);
4058 del_timer(&adapter->timer_service);
4060 ena_com_delete_host_info(ena_dev);
4061 ena_com_admin_destroy(ena_dev);
4063 free_netdev(netdev);
4065 ena_release_bars(ena_dev, pdev);
4069 pci_disable_device(pdev);
4073 /*****************************************************************************/
4075 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4076 * @pdev: PCI device information struct
4077 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4079 * __ena_shutoff is a helper routine that does the real work on shutdown and
4080 * removal paths; the difference between those paths is with regards to whether
4081 * dettach or unregister the netdevice.
4083 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4085 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4086 struct ena_com_dev *ena_dev;
4087 struct net_device *netdev;
4089 ena_dev = adapter->ena_dev;
4090 netdev = adapter->netdev;
4092 #ifdef CONFIG_RFS_ACCEL
4093 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4094 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4095 netdev->rx_cpu_rmap = NULL;
4098 #endif /* CONFIG_RFS_ACCEL */
4099 /* Make sure timer and reset routine won't be called after
4100 * freeing device resources.
4102 del_timer_sync(&adapter->timer_service);
4103 cancel_work_sync(&adapter->reset_task);
4105 rtnl_lock(); /* lock released inside the below if-else block */
4106 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4107 ena_destroy_device(adapter, true);
4110 netif_device_detach(netdev);
4115 unregister_netdev(netdev);
4116 free_netdev(netdev);
4119 ena_com_rss_destroy(ena_dev);
4121 ena_com_delete_debug_area(ena_dev);
4123 ena_com_delete_host_info(ena_dev);
4125 ena_release_bars(ena_dev, pdev);
4127 pci_disable_device(pdev);
4132 /* ena_remove - Device Removal Routine
4133 * @pdev: PCI device information struct
4135 * ena_remove is called by the PCI subsystem to alert the driver
4136 * that it should release a PCI device.
4139 static void ena_remove(struct pci_dev *pdev)
4141 __ena_shutoff(pdev, false);
4144 /* ena_shutdown - Device Shutdown Routine
4145 * @pdev: PCI device information struct
4147 * ena_shutdown is called by the PCI subsystem to alert the driver that
4148 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4151 static void ena_shutdown(struct pci_dev *pdev)
4153 __ena_shutoff(pdev, true);
4156 /* ena_suspend - PM suspend callback
4157 * @dev_d: Device information struct
4159 static int __maybe_unused ena_suspend(struct device *dev_d)
4161 struct pci_dev *pdev = to_pci_dev(dev_d);
4162 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4164 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
4167 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4169 "Ignoring device reset request as the device is being suspended\n");
4170 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4172 ena_destroy_device(adapter, true);
4177 /* ena_resume - PM resume callback
4178 * @dev_d: Device information struct
4180 static int __maybe_unused ena_resume(struct device *dev_d)
4182 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4185 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
4188 rc = ena_restore_device(adapter);
4193 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4195 static struct pci_driver ena_pci_driver = {
4196 .name = DRV_MODULE_NAME,
4197 .id_table = ena_pci_tbl,
4199 .remove = ena_remove,
4200 .shutdown = ena_shutdown,
4201 .driver.pm = &ena_pm_ops,
4202 .sriov_configure = pci_sriov_configure_simple,
4205 static int __init ena_init(void)
4209 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4211 pr_err("Failed to create workqueue\n");
4215 ret = pci_register_driver(&ena_pci_driver);
4217 destroy_workqueue(ena_wq);
4222 static void __exit ena_cleanup(void)
4224 pci_unregister_driver(&ena_pci_driver);
4227 destroy_workqueue(ena_wq);
4232 /******************************************************************************
4233 ******************************** AENQ Handlers *******************************
4234 *****************************************************************************/
4235 /* ena_update_on_link_change:
4236 * Notify the network interface about the change in link status
4238 static void ena_update_on_link_change(void *adapter_data,
4239 struct ena_admin_aenq_entry *aenq_e)
4241 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4242 struct ena_admin_aenq_link_change_desc *aenq_desc =
4243 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4244 int status = aenq_desc->flags &
4245 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4248 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4249 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4250 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4251 netif_carrier_on(adapter->netdev);
4253 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4254 netif_carrier_off(adapter->netdev);
4258 static void ena_keep_alive_wd(void *adapter_data,
4259 struct ena_admin_aenq_entry *aenq_e)
4261 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4262 struct ena_admin_aenq_keep_alive_desc *desc;
4266 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4267 adapter->last_keep_alive_jiffies = jiffies;
4269 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4270 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4272 u64_stats_update_begin(&adapter->syncp);
4273 /* These stats are accumulated by the device, so the counters indicate
4274 * all drops since last reset.
4276 adapter->dev_stats.rx_drops = rx_drops;
4277 adapter->dev_stats.tx_drops = tx_drops;
4278 u64_stats_update_end(&adapter->syncp);
4281 static void ena_notification(void *adapter_data,
4282 struct ena_admin_aenq_entry *aenq_e)
4284 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4285 struct ena_admin_ena_hw_hints *hints;
4287 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4288 "Invalid group(%x) expected %x\n",
4289 aenq_e->aenq_common_desc.group,
4290 ENA_ADMIN_NOTIFICATION);
4292 switch (aenq_e->aenq_common_desc.syndrome) {
4293 case ENA_ADMIN_UPDATE_HINTS:
4294 hints = (struct ena_admin_ena_hw_hints *)
4295 (&aenq_e->inline_data_w4);
4296 ena_update_hints(adapter, hints);
4299 netif_err(adapter, drv, adapter->netdev,
4300 "Invalid aenq notification link state %d\n",
4301 aenq_e->aenq_common_desc.syndrome);
4305 /* This handler will called for unknown event group or unimplemented handlers*/
4306 static void unimplemented_aenq_handler(void *data,
4307 struct ena_admin_aenq_entry *aenq_e)
4309 struct ena_adapter *adapter = (struct ena_adapter *)data;
4311 netif_err(adapter, drv, adapter->netdev,
4312 "Unknown event was received or event with unimplemented handler\n");
4315 static struct ena_aenq_handlers aenq_handlers = {
4317 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4318 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4319 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4321 .unimplemented_handler = unimplemented_aenq_handler
4324 module_init(ena_init);
4325 module_exit(ena_cleanup);