1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define ENA_NAPI_BUDGET 64
36 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
37 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
38 static int debug = -1;
39 module_param(debug, int, 0);
40 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
42 static struct ena_aenq_handlers aenq_handlers;
44 static struct workqueue_struct *ena_wq;
46 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
48 static int ena_rss_init_default(struct ena_adapter *adapter);
49 static void check_for_admin_com_state(struct ena_adapter *adapter);
50 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
51 static int ena_restore_device(struct ena_adapter *adapter);
53 static void ena_init_io_rings(struct ena_adapter *adapter,
54 int first_index, int count);
55 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
57 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
59 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
60 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
63 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
64 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
65 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
66 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
67 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
68 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
69 int first_index, int count);
70 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
71 int first_index, int count);
72 static int ena_up(struct ena_adapter *adapter);
73 static void ena_down(struct ena_adapter *adapter);
74 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
75 struct ena_ring *rx_ring);
76 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
77 struct ena_ring *rx_ring);
78 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
79 struct ena_tx_buffer *tx_info);
80 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
81 int first_index, int count);
83 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
84 static void ena_increase_stat(u64 *statp, u64 cnt,
85 struct u64_stats_sync *syncp)
87 u64_stats_update_begin(syncp);
89 u64_stats_update_end(syncp);
92 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
94 struct ena_adapter *adapter = netdev_priv(dev);
96 /* Change the state of the device to trigger reset
97 * Check that we are not in the middle or a trigger already
100 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
103 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
104 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
106 netif_err(adapter, tx_err, dev, "Transmit time out\n");
109 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
113 for (i = 0; i < adapter->num_io_queues; i++)
114 adapter->rx_ring[i].mtu = mtu;
117 static int ena_change_mtu(struct net_device *dev, int new_mtu)
119 struct ena_adapter *adapter = netdev_priv(dev);
122 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
124 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
125 update_rx_ring_mtu(adapter, new_mtu);
128 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
135 static int ena_xmit_common(struct net_device *dev,
136 struct ena_ring *ring,
137 struct ena_tx_buffer *tx_info,
138 struct ena_com_tx_ctx *ena_tx_ctx,
142 struct ena_adapter *adapter = netdev_priv(dev);
145 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
147 netif_dbg(adapter, tx_queued, dev,
148 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
150 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
153 /* prepare the packet's descriptors to dma engine */
154 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
157 /* In case there isn't enough space in the queue for the packet,
158 * we simply drop it. All other failure reasons of
159 * ena_com_prepare_tx() are fatal and therefore require a device reset.
162 netif_err(adapter, tx_queued, dev,
163 "Failed to prepare tx bufs\n");
164 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
167 adapter->reset_reason =
168 ENA_REGS_RESET_DRIVER_INVALID_STATE;
169 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
174 u64_stats_update_begin(&ring->syncp);
175 ring->tx_stats.cnt++;
176 ring->tx_stats.bytes += bytes;
177 u64_stats_update_end(&ring->syncp);
179 tx_info->tx_descs = nb_hw_desc;
180 tx_info->last_jiffies = jiffies;
181 tx_info->print_once = 0;
183 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
188 /* This is the XDP napi callback. XDP queues use a separate napi callback
191 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
193 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
194 u32 xdp_work_done, xdp_budget;
195 struct ena_ring *xdp_ring;
196 int napi_comp_call = 0;
199 xdp_ring = ena_napi->xdp_ring;
200 xdp_ring->first_interrupt = ena_napi->first_interrupt;
204 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
205 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
206 napi_complete_done(napi, 0);
210 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
212 /* If the device is about to reset or down, avoid unmask
213 * the interrupt and return 0 so NAPI won't reschedule
215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
216 napi_complete_done(napi, 0);
218 } else if (xdp_budget > xdp_work_done) {
220 if (napi_complete_done(napi, xdp_work_done))
221 ena_unmask_interrupt(xdp_ring, NULL);
222 ena_update_ring_numa_node(xdp_ring, NULL);
228 u64_stats_update_begin(&xdp_ring->syncp);
229 xdp_ring->tx_stats.napi_comp += napi_comp_call;
230 xdp_ring->tx_stats.tx_poll++;
231 u64_stats_update_end(&xdp_ring->syncp);
236 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
237 struct ena_tx_buffer *tx_info,
238 struct xdp_frame *xdpf,
242 struct ena_adapter *adapter = xdp_ring->adapter;
243 struct ena_com_buf *ena_buf;
247 tx_info->xdpf = xdpf;
248 size = tx_info->xdpf->len;
249 ena_buf = tx_info->bufs;
251 /* llq push buffer */
252 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
253 *push_hdr = tx_info->xdpf->data;
255 if (size - *push_len > 0) {
256 dma = dma_map_single(xdp_ring->dev,
257 *push_hdr + *push_len,
260 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
261 goto error_report_dma_error;
263 tx_info->map_linear_data = 1;
264 tx_info->num_of_bufs = 1;
267 ena_buf->paddr = dma;
272 error_report_dma_error:
273 ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
275 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
277 xdp_return_frame_rx_napi(tx_info->xdpf);
278 tx_info->xdpf = NULL;
279 tx_info->num_of_bufs = 0;
284 static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
285 struct net_device *dev,
286 struct xdp_frame *xdpf,
289 struct ena_com_tx_ctx ena_tx_ctx = {};
290 struct ena_tx_buffer *tx_info;
291 u16 next_to_use, req_id;
296 next_to_use = xdp_ring->next_to_use;
297 req_id = xdp_ring->free_ids[next_to_use];
298 tx_info = &xdp_ring->tx_buffer_info[req_id];
299 tx_info->num_of_bufs = 0;
301 rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
305 ena_tx_ctx.ena_bufs = tx_info->bufs;
306 ena_tx_ctx.push_header = push_hdr;
307 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
308 ena_tx_ctx.req_id = req_id;
309 ena_tx_ctx.header_len = push_len;
311 rc = ena_xmit_common(dev,
318 goto error_unmap_dma;
319 /* trigger the dma engine. ena_com_write_sq_doorbell()
322 if (flags & XDP_XMIT_FLUSH) {
323 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
324 ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
331 ena_unmap_tx_buff(xdp_ring, tx_info);
332 tx_info->xdpf = NULL;
336 static int ena_xdp_xmit(struct net_device *dev, int n,
337 struct xdp_frame **frames, u32 flags)
339 struct ena_adapter *adapter = netdev_priv(dev);
340 struct ena_ring *xdp_ring;
341 int qid, i, nxmit = 0;
343 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
346 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
349 /* We assume that all rings have the same XDP program */
350 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
353 qid = smp_processor_id() % adapter->xdp_num_queues;
354 qid += adapter->xdp_first_ring;
355 xdp_ring = &adapter->tx_ring[qid];
357 /* Other CPU ids might try to send thorugh this queue */
358 spin_lock(&xdp_ring->xdp_tx_lock);
360 for (i = 0; i < n; i++) {
361 if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
366 /* Ring doorbell to make device aware of the packets */
367 if (flags & XDP_XMIT_FLUSH) {
368 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
369 ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
373 spin_unlock(&xdp_ring->xdp_tx_lock);
375 /* Return number of packets sent */
379 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
381 struct bpf_prog *xdp_prog;
382 struct ena_ring *xdp_ring;
383 u32 verdict = XDP_PASS;
384 struct xdp_frame *xdpf;
389 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
394 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
398 xdpf = xdp_convert_buff_to_frame(xdp);
399 if (unlikely(!xdpf)) {
400 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
401 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
402 verdict = XDP_ABORTED;
406 /* Find xmit queue */
407 qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
408 xdp_ring = &rx_ring->adapter->tx_ring[qid];
410 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
411 spin_lock(&xdp_ring->xdp_tx_lock);
413 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
415 xdp_return_frame(xdpf);
417 spin_unlock(&xdp_ring->xdp_tx_lock);
418 xdp_stat = &rx_ring->rx_stats.xdp_tx;
421 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
422 xdp_stat = &rx_ring->rx_stats.xdp_redirect;
425 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
426 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
427 verdict = XDP_ABORTED;
430 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
431 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
434 xdp_stat = &rx_ring->rx_stats.xdp_drop;
437 xdp_stat = &rx_ring->rx_stats.xdp_pass;
440 bpf_warn_invalid_xdp_action(verdict);
441 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
444 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
451 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
453 adapter->xdp_first_ring = adapter->num_io_queues;
454 adapter->xdp_num_queues = adapter->num_io_queues;
456 ena_init_io_rings(adapter,
457 adapter->xdp_first_ring,
458 adapter->xdp_num_queues);
461 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
465 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
466 adapter->xdp_num_queues);
470 rc = ena_create_io_tx_queues_in_range(adapter,
471 adapter->xdp_first_ring,
472 adapter->xdp_num_queues);
479 ena_free_all_io_tx_resources(adapter);
484 /* Provides a way for both kernel and bpf-prog to know
485 * more about the RX-queue a given XDP frame arrived on.
487 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
491 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
494 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
495 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
500 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
504 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
505 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
507 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
514 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
516 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
517 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
520 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
521 struct bpf_prog *prog,
522 int first, int count)
524 struct ena_ring *rx_ring;
527 for (i = first; i < count; i++) {
528 rx_ring = &adapter->rx_ring[i];
529 xchg(&rx_ring->xdp_bpf_prog, prog);
531 ena_xdp_register_rxq_info(rx_ring);
532 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
534 ena_xdp_unregister_rxq_info(rx_ring);
535 rx_ring->rx_headroom = 0;
540 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
541 struct bpf_prog *prog)
543 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
545 ena_xdp_exchange_program_rx_in_range(adapter,
548 adapter->num_io_queues);
551 bpf_prog_put(old_bpf_prog);
554 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
559 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
564 adapter->xdp_first_ring = 0;
565 adapter->xdp_num_queues = 0;
566 ena_xdp_exchange_program(adapter, NULL);
568 rc = ena_up(adapter);
575 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
577 struct ena_adapter *adapter = netdev_priv(netdev);
578 struct bpf_prog *prog = bpf->prog;
579 struct bpf_prog *old_bpf_prog;
583 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
584 rc = ena_xdp_allowed(adapter);
585 if (rc == ENA_XDP_ALLOWED) {
586 old_bpf_prog = adapter->xdp_bpf_prog;
589 ena_init_all_xdp_queues(adapter);
590 } else if (!old_bpf_prog) {
592 ena_init_all_xdp_queues(adapter);
594 ena_xdp_exchange_program(adapter, prog);
596 if (is_up && !old_bpf_prog) {
597 rc = ena_up(adapter);
601 } else if (old_bpf_prog) {
602 rc = ena_destroy_and_free_all_xdp_queues(adapter);
607 prev_mtu = netdev->max_mtu;
608 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
611 netif_info(adapter, drv, adapter->netdev,
612 "XDP program is set, changing the max_mtu from %d to %d",
613 prev_mtu, netdev->max_mtu);
615 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
616 netif_err(adapter, drv, adapter->netdev,
617 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
618 netdev->mtu, ENA_XDP_MAX_MTU);
619 NL_SET_ERR_MSG_MOD(bpf->extack,
620 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
622 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
623 netif_err(adapter, drv, adapter->netdev,
624 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
625 adapter->num_io_queues, adapter->max_num_io_queues);
626 NL_SET_ERR_MSG_MOD(bpf->extack,
627 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
634 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
635 * program as well as to query the current xdp program id.
637 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
639 switch (bpf->command) {
641 return ena_xdp_set(netdev, bpf);
648 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
650 #ifdef CONFIG_RFS_ACCEL
654 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
655 if (!adapter->netdev->rx_cpu_rmap)
657 for (i = 0; i < adapter->num_io_queues; i++) {
658 int irq_idx = ENA_IO_IRQ_IDX(i);
660 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
661 pci_irq_vector(adapter->pdev, irq_idx));
663 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
664 adapter->netdev->rx_cpu_rmap = NULL;
668 #endif /* CONFIG_RFS_ACCEL */
672 static void ena_init_io_rings_common(struct ena_adapter *adapter,
673 struct ena_ring *ring, u16 qid)
676 ring->pdev = adapter->pdev;
677 ring->dev = &adapter->pdev->dev;
678 ring->netdev = adapter->netdev;
679 ring->napi = &adapter->ena_napi[qid].napi;
680 ring->adapter = adapter;
681 ring->ena_dev = adapter->ena_dev;
682 ring->per_napi_packets = 0;
684 ring->first_interrupt = false;
685 ring->no_interrupt_event_cnt = 0;
686 u64_stats_init(&ring->syncp);
689 static void ena_init_io_rings(struct ena_adapter *adapter,
690 int first_index, int count)
692 struct ena_com_dev *ena_dev;
693 struct ena_ring *txr, *rxr;
696 ena_dev = adapter->ena_dev;
698 for (i = first_index; i < first_index + count; i++) {
699 txr = &adapter->tx_ring[i];
700 rxr = &adapter->rx_ring[i];
702 /* TX common ring state */
703 ena_init_io_rings_common(adapter, txr, i);
705 /* TX specific ring state */
706 txr->ring_size = adapter->requested_tx_ring_size;
707 txr->tx_max_header_size = ena_dev->tx_max_header_size;
708 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
709 txr->sgl_size = adapter->max_tx_sgl_size;
710 txr->smoothed_interval =
711 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
712 txr->disable_meta_caching = adapter->disable_meta_caching;
713 spin_lock_init(&txr->xdp_tx_lock);
715 /* Don't init RX queues for xdp queues */
716 if (!ENA_IS_XDP_INDEX(adapter, i)) {
717 /* RX common ring state */
718 ena_init_io_rings_common(adapter, rxr, i);
720 /* RX specific ring state */
721 rxr->ring_size = adapter->requested_rx_ring_size;
722 rxr->rx_copybreak = adapter->rx_copybreak;
723 rxr->sgl_size = adapter->max_rx_sgl_size;
724 rxr->smoothed_interval =
725 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
726 rxr->empty_rx_queue = 0;
727 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
732 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
733 * @adapter: network interface device structure
736 * Return 0 on success, negative on failure
738 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
740 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
741 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
744 if (tx_ring->tx_buffer_info) {
745 netif_err(adapter, ifup,
746 adapter->netdev, "tx_buffer_info info is not NULL");
750 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
751 node = cpu_to_node(ena_irq->cpu);
753 tx_ring->tx_buffer_info = vzalloc_node(size, node);
754 if (!tx_ring->tx_buffer_info) {
755 tx_ring->tx_buffer_info = vzalloc(size);
756 if (!tx_ring->tx_buffer_info)
757 goto err_tx_buffer_info;
760 size = sizeof(u16) * tx_ring->ring_size;
761 tx_ring->free_ids = vzalloc_node(size, node);
762 if (!tx_ring->free_ids) {
763 tx_ring->free_ids = vzalloc(size);
764 if (!tx_ring->free_ids)
765 goto err_tx_free_ids;
768 size = tx_ring->tx_max_header_size;
769 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
770 if (!tx_ring->push_buf_intermediate_buf) {
771 tx_ring->push_buf_intermediate_buf = vzalloc(size);
772 if (!tx_ring->push_buf_intermediate_buf)
773 goto err_push_buf_intermediate_buf;
776 /* Req id ring for TX out of order completions */
777 for (i = 0; i < tx_ring->ring_size; i++)
778 tx_ring->free_ids[i] = i;
780 /* Reset tx statistics */
781 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
783 tx_ring->next_to_use = 0;
784 tx_ring->next_to_clean = 0;
785 tx_ring->cpu = ena_irq->cpu;
788 err_push_buf_intermediate_buf:
789 vfree(tx_ring->free_ids);
790 tx_ring->free_ids = NULL;
792 vfree(tx_ring->tx_buffer_info);
793 tx_ring->tx_buffer_info = NULL;
798 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
799 * @adapter: network interface device structure
802 * Free all transmit software resources
804 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
806 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
808 vfree(tx_ring->tx_buffer_info);
809 tx_ring->tx_buffer_info = NULL;
811 vfree(tx_ring->free_ids);
812 tx_ring->free_ids = NULL;
814 vfree(tx_ring->push_buf_intermediate_buf);
815 tx_ring->push_buf_intermediate_buf = NULL;
818 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
824 for (i = first_index; i < first_index + count; i++) {
825 rc = ena_setup_tx_resources(adapter, i);
834 netif_err(adapter, ifup, adapter->netdev,
835 "Tx queue %d: allocation failed\n", i);
837 /* rewind the index freeing the rings as we go */
838 while (first_index < i--)
839 ena_free_tx_resources(adapter, i);
843 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
844 int first_index, int count)
848 for (i = first_index; i < first_index + count; i++)
849 ena_free_tx_resources(adapter, i);
852 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
853 * @adapter: board private structure
855 * Free all transmit software resources
857 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
859 ena_free_all_io_tx_resources_in_range(adapter,
861 adapter->xdp_num_queues +
862 adapter->num_io_queues);
865 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
866 * @adapter: network interface device structure
869 * Returns 0 on success, negative on failure
871 static int ena_setup_rx_resources(struct ena_adapter *adapter,
874 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
875 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
878 if (rx_ring->rx_buffer_info) {
879 netif_err(adapter, ifup, adapter->netdev,
880 "rx_buffer_info is not NULL");
884 /* alloc extra element so in rx path
885 * we can always prefetch rx_info + 1
887 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
888 node = cpu_to_node(ena_irq->cpu);
890 rx_ring->rx_buffer_info = vzalloc_node(size, node);
891 if (!rx_ring->rx_buffer_info) {
892 rx_ring->rx_buffer_info = vzalloc(size);
893 if (!rx_ring->rx_buffer_info)
897 size = sizeof(u16) * rx_ring->ring_size;
898 rx_ring->free_ids = vzalloc_node(size, node);
899 if (!rx_ring->free_ids) {
900 rx_ring->free_ids = vzalloc(size);
901 if (!rx_ring->free_ids) {
902 vfree(rx_ring->rx_buffer_info);
903 rx_ring->rx_buffer_info = NULL;
908 /* Req id ring for receiving RX pkts out of order */
909 for (i = 0; i < rx_ring->ring_size; i++)
910 rx_ring->free_ids[i] = i;
912 /* Reset rx statistics */
913 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
915 rx_ring->next_to_clean = 0;
916 rx_ring->next_to_use = 0;
917 rx_ring->cpu = ena_irq->cpu;
922 /* ena_free_rx_resources - Free I/O Rx Resources
923 * @adapter: network interface device structure
926 * Free all receive software resources
928 static void ena_free_rx_resources(struct ena_adapter *adapter,
931 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
933 vfree(rx_ring->rx_buffer_info);
934 rx_ring->rx_buffer_info = NULL;
936 vfree(rx_ring->free_ids);
937 rx_ring->free_ids = NULL;
940 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
941 * @adapter: board private structure
943 * Return 0 on success, negative on failure
945 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
949 for (i = 0; i < adapter->num_io_queues; i++) {
950 rc = ena_setup_rx_resources(adapter, i);
959 netif_err(adapter, ifup, adapter->netdev,
960 "Rx queue %d: allocation failed\n", i);
962 /* rewind the index freeing the rings as we go */
964 ena_free_rx_resources(adapter, i);
968 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
969 * @adapter: board private structure
971 * Free all receive software resources
973 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
977 for (i = 0; i < adapter->num_io_queues; i++)
978 ena_free_rx_resources(adapter, i);
981 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
982 struct ena_rx_buffer *rx_info, gfp_t gfp)
984 int headroom = rx_ring->rx_headroom;
985 struct ena_com_buf *ena_buf;
989 /* restore page offset value in case it has been changed by device */
990 rx_info->page_offset = headroom;
992 /* if previous allocated page is not used */
993 if (unlikely(rx_info->page))
996 page = alloc_page(gfp);
997 if (unlikely(!page)) {
998 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
1003 /* To enable NIC-side port-mirroring, AKA SPAN port,
1004 * we make the buffer readable from the nic as well
1006 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1008 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
1009 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
1015 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1016 "Allocate page %p, rx_info %p\n", page, rx_info);
1018 rx_info->page = page;
1019 ena_buf = &rx_info->ena_buf;
1020 ena_buf->paddr = dma + headroom;
1021 ena_buf->len = ENA_PAGE_SIZE - headroom;
1026 static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
1027 struct ena_rx_buffer *rx_info)
1029 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
1031 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
1036 static void ena_free_rx_page(struct ena_ring *rx_ring,
1037 struct ena_rx_buffer *rx_info)
1039 struct page *page = rx_info->page;
1041 if (unlikely(!page)) {
1042 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1043 "Trying to free unallocated buffer\n");
1047 ena_unmap_rx_buff(rx_ring, rx_info);
1050 rx_info->page = NULL;
1053 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1055 u16 next_to_use, req_id;
1059 next_to_use = rx_ring->next_to_use;
1061 for (i = 0; i < num; i++) {
1062 struct ena_rx_buffer *rx_info;
1064 req_id = rx_ring->free_ids[next_to_use];
1066 rx_info = &rx_ring->rx_buffer_info[req_id];
1068 rc = ena_alloc_rx_page(rx_ring, rx_info,
1069 GFP_ATOMIC | __GFP_COMP);
1070 if (unlikely(rc < 0)) {
1071 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1072 "Failed to allocate buffer for rx queue %d\n",
1076 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1080 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1081 "Failed to add buffer for rx queue %d\n",
1085 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1086 rx_ring->ring_size);
1089 if (unlikely(i < num)) {
1090 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
1092 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1093 "Refilled rx qid %d with only %d buffers (from %d)\n",
1094 rx_ring->qid, i, num);
1097 /* ena_com_write_sq_doorbell issues a wmb() */
1099 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1101 rx_ring->next_to_use = next_to_use;
1106 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1109 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1112 for (i = 0; i < rx_ring->ring_size; i++) {
1113 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1116 ena_free_rx_page(rx_ring, rx_info);
1120 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1121 * @adapter: board private structure
1123 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1125 struct ena_ring *rx_ring;
1126 int i, rc, bufs_num;
1128 for (i = 0; i < adapter->num_io_queues; i++) {
1129 rx_ring = &adapter->rx_ring[i];
1130 bufs_num = rx_ring->ring_size - 1;
1131 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1133 if (unlikely(rc != bufs_num))
1134 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1135 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1140 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1144 for (i = 0; i < adapter->num_io_queues; i++)
1145 ena_free_rx_bufs(adapter, i);
1148 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1149 struct ena_tx_buffer *tx_info)
1151 struct ena_com_buf *ena_buf;
1155 ena_buf = tx_info->bufs;
1156 cnt = tx_info->num_of_bufs;
1161 if (tx_info->map_linear_data) {
1162 dma_unmap_single(tx_ring->dev,
1163 dma_unmap_addr(ena_buf, paddr),
1164 dma_unmap_len(ena_buf, len),
1170 /* unmap remaining mapped pages */
1171 for (i = 0; i < cnt; i++) {
1172 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1173 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1178 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1179 * @tx_ring: TX ring for which buffers be freed
1181 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1183 bool print_once = true;
1186 for (i = 0; i < tx_ring->ring_size; i++) {
1187 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1193 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
1194 "Free uncompleted tx skb qid %d idx 0x%x\n",
1198 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
1199 "Free uncompleted tx skb qid %d idx 0x%x\n",
1203 ena_unmap_tx_buff(tx_ring, tx_info);
1205 dev_kfree_skb_any(tx_info->skb);
1207 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1211 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1213 struct ena_ring *tx_ring;
1216 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1217 tx_ring = &adapter->tx_ring[i];
1218 ena_free_tx_bufs(tx_ring);
1222 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1227 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1228 ena_qid = ENA_IO_TXQ_IDX(i);
1229 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1233 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1238 for (i = 0; i < adapter->num_io_queues; i++) {
1239 ena_qid = ENA_IO_RXQ_IDX(i);
1240 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1241 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1245 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1247 ena_destroy_all_tx_queues(adapter);
1248 ena_destroy_all_rx_queues(adapter);
1251 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1252 struct ena_tx_buffer *tx_info, bool is_xdp)
1255 netif_err(ring->adapter,
1258 "tx_info doesn't have valid %s",
1259 is_xdp ? "xdp frame" : "skb");
1261 netif_err(ring->adapter,
1264 "Invalid req_id: %hu\n",
1267 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
1269 /* Trigger device reset */
1270 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1271 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1275 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1277 struct ena_tx_buffer *tx_info = NULL;
1279 if (likely(req_id < tx_ring->ring_size)) {
1280 tx_info = &tx_ring->tx_buffer_info[req_id];
1281 if (likely(tx_info->skb))
1285 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1288 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1290 struct ena_tx_buffer *tx_info = NULL;
1292 if (likely(req_id < xdp_ring->ring_size)) {
1293 tx_info = &xdp_ring->tx_buffer_info[req_id];
1294 if (likely(tx_info->xdpf))
1298 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1301 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1303 struct netdev_queue *txq;
1312 next_to_clean = tx_ring->next_to_clean;
1313 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1315 while (tx_pkts < budget) {
1316 struct ena_tx_buffer *tx_info;
1317 struct sk_buff *skb;
1319 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1324 rc = validate_tx_req_id(tx_ring, req_id);
1328 tx_info = &tx_ring->tx_buffer_info[req_id];
1331 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1332 prefetch(&skb->end);
1334 tx_info->skb = NULL;
1335 tx_info->last_jiffies = 0;
1337 ena_unmap_tx_buff(tx_ring, tx_info);
1339 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1340 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1343 tx_bytes += skb->len;
1346 total_done += tx_info->tx_descs;
1348 tx_ring->free_ids[next_to_clean] = req_id;
1349 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1350 tx_ring->ring_size);
1353 tx_ring->next_to_clean = next_to_clean;
1354 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1355 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1357 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1359 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1360 "tx_poll: q %d done. total pkts: %d\n",
1361 tx_ring->qid, tx_pkts);
1363 /* need to make the rings circular update visible to
1364 * ena_start_xmit() before checking for netif_queue_stopped().
1368 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1369 ENA_TX_WAKEUP_THRESH);
1370 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1371 __netif_tx_lock(txq, smp_processor_id());
1373 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1374 ENA_TX_WAKEUP_THRESH);
1375 if (netif_tx_queue_stopped(txq) && above_thresh &&
1376 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1377 netif_tx_wake_queue(txq);
1378 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
1381 __netif_tx_unlock(txq);
1387 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1389 struct sk_buff *skb;
1392 skb = napi_get_frags(rx_ring->napi);
1394 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1395 rx_ring->rx_copybreak);
1397 if (unlikely(!skb)) {
1398 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
1400 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1401 "Failed to allocate skb. frags: %d\n", frags);
1408 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1409 struct ena_com_rx_buf_info *ena_bufs,
1413 struct sk_buff *skb;
1414 struct ena_rx_buffer *rx_info;
1415 u16 len, req_id, buf = 0;
1418 len = ena_bufs[buf].len;
1419 req_id = ena_bufs[buf].req_id;
1421 rx_info = &rx_ring->rx_buffer_info[req_id];
1423 if (unlikely(!rx_info->page)) {
1424 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1429 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1430 "rx_info %p page %p\n",
1431 rx_info, rx_info->page);
1433 /* save virt address of first buffer */
1434 va = page_address(rx_info->page) + rx_info->page_offset;
1438 if (len <= rx_ring->rx_copybreak) {
1439 skb = ena_alloc_skb(rx_ring, false);
1443 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1444 "RX allocated small packet. len %d. data_len %d\n",
1445 skb->len, skb->data_len);
1447 /* sync this buffer for CPU use */
1448 dma_sync_single_for_cpu(rx_ring->dev,
1449 dma_unmap_addr(&rx_info->ena_buf, paddr),
1452 skb_copy_to_linear_data(skb, va, len);
1453 dma_sync_single_for_device(rx_ring->dev,
1454 dma_unmap_addr(&rx_info->ena_buf, paddr),
1459 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1460 rx_ring->free_ids[*next_to_clean] = req_id;
1461 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1462 rx_ring->ring_size);
1466 skb = ena_alloc_skb(rx_ring, true);
1471 ena_unmap_rx_buff(rx_ring, rx_info);
1473 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1474 rx_info->page_offset, len, ENA_PAGE_SIZE);
1476 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1477 "RX skb updated. len %d. data_len %d\n",
1478 skb->len, skb->data_len);
1480 rx_info->page = NULL;
1482 rx_ring->free_ids[*next_to_clean] = req_id;
1484 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1485 rx_ring->ring_size);
1486 if (likely(--descs == 0))
1490 len = ena_bufs[buf].len;
1491 req_id = ena_bufs[buf].req_id;
1493 rx_info = &rx_ring->rx_buffer_info[req_id];
1499 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1500 * @adapter: structure containing adapter specific data
1501 * @ena_rx_ctx: received packet context/metadata
1502 * @skb: skb currently being received and modified
1504 static void ena_rx_checksum(struct ena_ring *rx_ring,
1505 struct ena_com_rx_ctx *ena_rx_ctx,
1506 struct sk_buff *skb)
1508 /* Rx csum disabled */
1509 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1510 skb->ip_summed = CHECKSUM_NONE;
1514 /* For fragmented packets the checksum isn't valid */
1515 if (ena_rx_ctx->frag) {
1516 skb->ip_summed = CHECKSUM_NONE;
1520 /* if IP and error */
1521 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1522 (ena_rx_ctx->l3_csum_err))) {
1523 /* ipv4 checksum error */
1524 skb->ip_summed = CHECKSUM_NONE;
1525 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
1527 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1528 "RX IPv4 header checksum error\n");
1533 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1534 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1535 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1536 /* TCP/UDP checksum error */
1537 ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
1539 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1540 "RX L4 checksum error\n");
1541 skb->ip_summed = CHECKSUM_NONE;
1545 if (likely(ena_rx_ctx->l4_csum_checked)) {
1546 skb->ip_summed = CHECKSUM_UNNECESSARY;
1547 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
1550 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
1552 skb->ip_summed = CHECKSUM_NONE;
1555 skb->ip_summed = CHECKSUM_NONE;
1561 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1562 struct ena_com_rx_ctx *ena_rx_ctx,
1563 struct sk_buff *skb)
1565 enum pkt_hash_types hash_type;
1567 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1568 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1569 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1571 hash_type = PKT_HASH_TYPE_L4;
1573 hash_type = PKT_HASH_TYPE_NONE;
1575 /* Override hash type if the packet is fragmented */
1576 if (ena_rx_ctx->frag)
1577 hash_type = PKT_HASH_TYPE_NONE;
1579 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1583 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1585 struct ena_rx_buffer *rx_info;
1588 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1589 xdp_prepare_buff(xdp, page_address(rx_info->page),
1590 rx_info->page_offset,
1591 rx_ring->ena_bufs[0].len, false);
1592 /* If for some reason we received a bigger packet than
1593 * we expect, then we simply drop it
1595 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1598 ret = ena_xdp_execute(rx_ring, xdp);
1600 /* The xdp program might expand the headers */
1601 if (ret == XDP_PASS) {
1602 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1603 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1608 /* ena_clean_rx_irq - Cleanup RX irq
1609 * @rx_ring: RX ring to clean
1610 * @napi: napi handler
1611 * @budget: how many packets driver is allowed to clean
1613 * Returns the number of cleaned buffers.
1615 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1618 u16 next_to_clean = rx_ring->next_to_clean;
1619 struct ena_com_rx_ctx ena_rx_ctx;
1620 struct ena_rx_buffer *rx_info;
1621 struct ena_adapter *adapter;
1622 u32 res_budget, work_done;
1623 int rx_copybreak_pkt = 0;
1624 int refill_threshold;
1625 struct sk_buff *skb;
1626 int refill_required;
1627 struct xdp_buff xdp;
1634 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1635 "%s qid %d\n", __func__, rx_ring->qid);
1636 res_budget = budget;
1637 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
1640 xdp_verdict = XDP_PASS;
1642 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1643 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1644 ena_rx_ctx.descs = 0;
1645 ena_rx_ctx.pkt_offset = 0;
1646 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1647 rx_ring->ena_com_io_sq,
1652 if (unlikely(ena_rx_ctx.descs == 0))
1655 /* First descriptor might have an offset set by the device */
1656 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1657 rx_info->page_offset += ena_rx_ctx.pkt_offset;
1659 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1660 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1661 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1662 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1664 if (ena_xdp_present_ring(rx_ring))
1665 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1667 /* allocate skb and fill it */
1668 if (xdp_verdict == XDP_PASS)
1669 skb = ena_rx_skb(rx_ring,
1674 if (unlikely(!skb)) {
1675 for (i = 0; i < ena_rx_ctx.descs; i++) {
1676 int req_id = rx_ring->ena_bufs[i].req_id;
1678 rx_ring->free_ids[next_to_clean] = req_id;
1680 ENA_RX_RING_IDX_NEXT(next_to_clean,
1681 rx_ring->ring_size);
1683 /* Packets was passed for transmission, unmap it
1686 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
1687 ena_unmap_rx_buff(rx_ring,
1688 &rx_ring->rx_buffer_info[req_id]);
1689 rx_ring->rx_buffer_info[req_id].page = NULL;
1692 if (xdp_verdict != XDP_PASS) {
1693 xdp_flags |= xdp_verdict;
1700 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1702 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1704 skb_record_rx_queue(skb, rx_ring->qid);
1706 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1707 total_len += rx_ring->ena_bufs[0].len;
1709 napi_gro_receive(napi, skb);
1711 total_len += skb->len;
1712 napi_gro_frags(napi);
1716 } while (likely(res_budget));
1718 work_done = budget - res_budget;
1719 rx_ring->per_napi_packets += work_done;
1720 u64_stats_update_begin(&rx_ring->syncp);
1721 rx_ring->rx_stats.bytes += total_len;
1722 rx_ring->rx_stats.cnt += work_done;
1723 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1724 u64_stats_update_end(&rx_ring->syncp);
1726 rx_ring->next_to_clean = next_to_clean;
1728 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1730 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1731 ENA_RX_REFILL_THRESH_PACKET);
1733 /* Optimization, try to batch new rx buffers */
1734 if (refill_required > refill_threshold) {
1735 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1736 ena_refill_rx_bufs(rx_ring, refill_required);
1739 if (xdp_flags & XDP_REDIRECT)
1745 adapter = netdev_priv(rx_ring->netdev);
1747 if (rc == -ENOSPC) {
1748 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
1750 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1752 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
1754 adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
1757 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1762 static void ena_dim_work(struct work_struct *w)
1764 struct dim *dim = container_of(w, struct dim, work);
1765 struct dim_cq_moder cur_moder =
1766 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1767 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1769 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1770 dim->state = DIM_START_MEASURE;
1773 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1775 struct dim_sample dim_sample;
1776 struct ena_ring *rx_ring = ena_napi->rx_ring;
1778 if (!rx_ring->per_napi_packets)
1781 rx_ring->non_empty_napi_events++;
1783 dim_update_sample(rx_ring->non_empty_napi_events,
1784 rx_ring->rx_stats.cnt,
1785 rx_ring->rx_stats.bytes,
1788 net_dim(&ena_napi->dim, dim_sample);
1790 rx_ring->per_napi_packets = 0;
1793 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1794 struct ena_ring *rx_ring)
1796 struct ena_eth_io_intr_reg intr_reg;
1797 u32 rx_interval = 0;
1798 /* Rx ring can be NULL when for XDP tx queues which don't have an
1799 * accompanying rx_ring pair.
1802 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1803 rx_ring->smoothed_interval :
1804 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1806 /* Update intr register: rx intr delay,
1807 * tx intr delay and interrupt unmask
1809 ena_com_update_intr_reg(&intr_reg,
1811 tx_ring->smoothed_interval,
1814 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1817 /* It is a shared MSI-X.
1818 * Tx and Rx CQ have pointer to it.
1819 * So we use one of them to reach the intr reg
1820 * The Tx ring is used because the rx_ring is NULL for XDP queues
1822 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1825 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1826 struct ena_ring *rx_ring)
1828 int cpu = get_cpu();
1831 /* Check only one ring since the 2 rings are running on the same cpu */
1832 if (likely(tx_ring->cpu == cpu))
1835 numa_node = cpu_to_node(cpu);
1838 if (numa_node != NUMA_NO_NODE) {
1839 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1841 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1854 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1863 if (unlikely(!xdp_ring))
1865 next_to_clean = xdp_ring->next_to_clean;
1867 while (tx_pkts < budget) {
1868 struct ena_tx_buffer *tx_info;
1869 struct xdp_frame *xdpf;
1871 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1876 rc = validate_xdp_req_id(xdp_ring, req_id);
1880 tx_info = &xdp_ring->tx_buffer_info[req_id];
1881 xdpf = tx_info->xdpf;
1883 tx_info->xdpf = NULL;
1884 tx_info->last_jiffies = 0;
1885 ena_unmap_tx_buff(xdp_ring, tx_info);
1887 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1888 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1891 tx_bytes += xdpf->len;
1893 total_done += tx_info->tx_descs;
1895 xdp_return_frame(xdpf);
1896 xdp_ring->free_ids[next_to_clean] = req_id;
1897 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1898 xdp_ring->ring_size);
1901 xdp_ring->next_to_clean = next_to_clean;
1902 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1903 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1905 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1906 "tx_poll: q %d done. total pkts: %d\n",
1907 xdp_ring->qid, tx_pkts);
1912 static int ena_io_poll(struct napi_struct *napi, int budget)
1914 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1915 struct ena_ring *tx_ring, *rx_ring;
1917 int rx_work_done = 0;
1919 int napi_comp_call = 0;
1922 tx_ring = ena_napi->tx_ring;
1923 rx_ring = ena_napi->rx_ring;
1925 tx_ring->first_interrupt = ena_napi->first_interrupt;
1926 rx_ring->first_interrupt = ena_napi->first_interrupt;
1928 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1930 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1931 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1932 napi_complete_done(napi, 0);
1936 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1937 /* On netpoll the budget is zero and the handler should only clean the
1941 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1943 /* If the device is about to reset or down, avoid unmask
1944 * the interrupt and return 0 so NAPI won't reschedule
1946 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1947 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1948 napi_complete_done(napi, 0);
1951 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1954 /* Update numa and unmask the interrupt only when schedule
1955 * from the interrupt context (vs from sk_busy_loop)
1957 if (napi_complete_done(napi, rx_work_done) &&
1958 READ_ONCE(ena_napi->interrupts_masked)) {
1959 smp_rmb(); /* make sure interrupts_masked is read */
1960 WRITE_ONCE(ena_napi->interrupts_masked, false);
1961 /* We apply adaptive moderation on Rx path only.
1962 * Tx uses static interrupt moderation.
1964 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1965 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1967 ena_unmask_interrupt(tx_ring, rx_ring);
1970 ena_update_ring_numa_node(tx_ring, rx_ring);
1977 u64_stats_update_begin(&tx_ring->syncp);
1978 tx_ring->tx_stats.napi_comp += napi_comp_call;
1979 tx_ring->tx_stats.tx_poll++;
1980 u64_stats_update_end(&tx_ring->syncp);
1985 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1987 struct ena_adapter *adapter = (struct ena_adapter *)data;
1989 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1991 /* Don't call the aenq handler before probe is done */
1992 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1993 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1998 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1999 * @irq: interrupt number
2000 * @data: pointer to a network interface private napi device structure
2002 static irqreturn_t ena_intr_msix_io(int irq, void *data)
2004 struct ena_napi *ena_napi = data;
2006 ena_napi->first_interrupt = true;
2008 WRITE_ONCE(ena_napi->interrupts_masked, true);
2009 smp_wmb(); /* write interrupts_masked before calling napi */
2011 napi_schedule_irqoff(&ena_napi->napi);
2016 /* Reserve a single MSI-X vector for management (admin + aenq).
2017 * plus reserve one vector for each potential io queue.
2018 * the number of potential io queues is the minimum of what the device
2019 * supports and the number of vCPUs.
2021 static int ena_enable_msix(struct ena_adapter *adapter)
2023 int msix_vecs, irq_cnt;
2025 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2026 netif_err(adapter, probe, adapter->netdev,
2027 "Error, MSI-X is already enabled\n");
2031 /* Reserved the max msix vectors we might need */
2032 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
2033 netif_dbg(adapter, probe, adapter->netdev,
2034 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
2036 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
2037 msix_vecs, PCI_IRQ_MSIX);
2040 netif_err(adapter, probe, adapter->netdev,
2041 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
2045 if (irq_cnt != msix_vecs) {
2046 netif_notice(adapter, probe, adapter->netdev,
2047 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
2048 irq_cnt, msix_vecs);
2049 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
2052 if (ena_init_rx_cpu_rmap(adapter))
2053 netif_warn(adapter, probe, adapter->netdev,
2054 "Failed to map IRQs to CPUs\n");
2056 adapter->msix_vecs = irq_cnt;
2057 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2062 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2066 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2067 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2068 pci_name(adapter->pdev));
2069 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2070 ena_intr_msix_mgmnt;
2071 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2072 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2073 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2074 cpu = cpumask_first(cpu_online_mask);
2075 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2076 cpumask_set_cpu(cpu,
2077 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2080 static void ena_setup_io_intr(struct ena_adapter *adapter)
2082 struct net_device *netdev;
2083 int irq_idx, i, cpu;
2086 netdev = adapter->netdev;
2087 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2089 for (i = 0; i < io_queue_count; i++) {
2090 irq_idx = ENA_IO_IRQ_IDX(i);
2091 cpu = i % num_online_cpus();
2093 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2094 "%s-Tx-Rx-%d", netdev->name, i);
2095 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2096 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2097 adapter->irq_tbl[irq_idx].vector =
2098 pci_irq_vector(adapter->pdev, irq_idx);
2099 adapter->irq_tbl[irq_idx].cpu = cpu;
2101 cpumask_set_cpu(cpu,
2102 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2106 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2108 unsigned long flags = 0;
2109 struct ena_irq *irq;
2112 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2113 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2116 netif_err(adapter, probe, adapter->netdev,
2117 "Failed to request admin irq\n");
2121 netif_dbg(adapter, probe, adapter->netdev,
2122 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2123 irq->affinity_hint_mask.bits[0], irq->vector);
2125 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2130 static int ena_request_io_irq(struct ena_adapter *adapter)
2132 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2133 unsigned long flags = 0;
2134 struct ena_irq *irq;
2137 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2138 netif_err(adapter, ifup, adapter->netdev,
2139 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2143 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2144 irq = &adapter->irq_tbl[i];
2145 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2148 netif_err(adapter, ifup, adapter->netdev,
2149 "Failed to request I/O IRQ. index %d rc %d\n",
2154 netif_dbg(adapter, ifup, adapter->netdev,
2155 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2156 i, irq->affinity_hint_mask.bits[0], irq->vector);
2158 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2164 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2165 irq = &adapter->irq_tbl[k];
2166 free_irq(irq->vector, irq->data);
2172 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2174 struct ena_irq *irq;
2176 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2177 synchronize_irq(irq->vector);
2178 irq_set_affinity_hint(irq->vector, NULL);
2179 free_irq(irq->vector, irq->data);
2182 static void ena_free_io_irq(struct ena_adapter *adapter)
2184 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2185 struct ena_irq *irq;
2188 #ifdef CONFIG_RFS_ACCEL
2189 if (adapter->msix_vecs >= 1) {
2190 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2191 adapter->netdev->rx_cpu_rmap = NULL;
2193 #endif /* CONFIG_RFS_ACCEL */
2195 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2196 irq = &adapter->irq_tbl[i];
2197 irq_set_affinity_hint(irq->vector, NULL);
2198 free_irq(irq->vector, irq->data);
2202 static void ena_disable_msix(struct ena_adapter *adapter)
2204 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2205 pci_free_irq_vectors(adapter->pdev);
2208 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2210 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2213 if (!netif_running(adapter->netdev))
2216 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2217 synchronize_irq(adapter->irq_tbl[i].vector);
2220 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2226 for (i = first_index; i < first_index + count; i++) {
2227 netif_napi_del(&adapter->ena_napi[i].napi);
2229 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2230 adapter->ena_napi[i].xdp_ring);
2234 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2235 int first_index, int count)
2239 for (i = first_index; i < first_index + count; i++) {
2240 struct ena_napi *napi = &adapter->ena_napi[i];
2242 netif_napi_add(adapter->netdev,
2244 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2247 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2248 napi->rx_ring = &adapter->rx_ring[i];
2249 napi->tx_ring = &adapter->tx_ring[i];
2251 napi->xdp_ring = &adapter->tx_ring[i];
2257 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2263 for (i = first_index; i < first_index + count; i++)
2264 napi_disable(&adapter->ena_napi[i].napi);
2267 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2273 for (i = first_index; i < first_index + count; i++)
2274 napi_enable(&adapter->ena_napi[i].napi);
2277 /* Configure the Rx forwarding */
2278 static int ena_rss_configure(struct ena_adapter *adapter)
2280 struct ena_com_dev *ena_dev = adapter->ena_dev;
2283 /* In case the RSS table wasn't initialized by probe */
2284 if (!ena_dev->rss.tbl_log_size) {
2285 rc = ena_rss_init_default(adapter);
2286 if (rc && (rc != -EOPNOTSUPP)) {
2287 netif_err(adapter, ifup, adapter->netdev,
2288 "Failed to init RSS rc: %d\n", rc);
2293 /* Set indirect table */
2294 rc = ena_com_indirect_table_set(ena_dev);
2295 if (unlikely(rc && rc != -EOPNOTSUPP))
2298 /* Configure hash function (if supported) */
2299 rc = ena_com_set_hash_function(ena_dev);
2300 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2303 /* Configure hash inputs (if supported) */
2304 rc = ena_com_set_hash_ctrl(ena_dev);
2305 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2311 static int ena_up_complete(struct ena_adapter *adapter)
2315 rc = ena_rss_configure(adapter);
2319 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2321 ena_refill_all_rx_bufs(adapter);
2323 /* enable transmits */
2324 netif_tx_start_all_queues(adapter->netdev);
2326 ena_napi_enable_in_range(adapter,
2328 adapter->xdp_num_queues + adapter->num_io_queues);
2333 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2335 struct ena_com_create_io_ctx ctx;
2336 struct ena_com_dev *ena_dev;
2337 struct ena_ring *tx_ring;
2342 ena_dev = adapter->ena_dev;
2344 tx_ring = &adapter->tx_ring[qid];
2345 msix_vector = ENA_IO_IRQ_IDX(qid);
2346 ena_qid = ENA_IO_TXQ_IDX(qid);
2348 memset(&ctx, 0x0, sizeof(ctx));
2350 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2352 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2353 ctx.msix_vector = msix_vector;
2354 ctx.queue_size = tx_ring->ring_size;
2355 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2357 rc = ena_com_create_io_queue(ena_dev, &ctx);
2359 netif_err(adapter, ifup, adapter->netdev,
2360 "Failed to create I/O TX queue num %d rc: %d\n",
2365 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2366 &tx_ring->ena_com_io_sq,
2367 &tx_ring->ena_com_io_cq);
2369 netif_err(adapter, ifup, adapter->netdev,
2370 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2372 ena_com_destroy_io_queue(ena_dev, ena_qid);
2376 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2380 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2381 int first_index, int count)
2383 struct ena_com_dev *ena_dev = adapter->ena_dev;
2386 for (i = first_index; i < first_index + count; i++) {
2387 rc = ena_create_io_tx_queue(adapter, i);
2395 while (i-- > first_index)
2396 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2401 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2403 struct ena_com_dev *ena_dev;
2404 struct ena_com_create_io_ctx ctx;
2405 struct ena_ring *rx_ring;
2410 ena_dev = adapter->ena_dev;
2412 rx_ring = &adapter->rx_ring[qid];
2413 msix_vector = ENA_IO_IRQ_IDX(qid);
2414 ena_qid = ENA_IO_RXQ_IDX(qid);
2416 memset(&ctx, 0x0, sizeof(ctx));
2419 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2420 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2421 ctx.msix_vector = msix_vector;
2422 ctx.queue_size = rx_ring->ring_size;
2423 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2425 rc = ena_com_create_io_queue(ena_dev, &ctx);
2427 netif_err(adapter, ifup, adapter->netdev,
2428 "Failed to create I/O RX queue num %d rc: %d\n",
2433 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2434 &rx_ring->ena_com_io_sq,
2435 &rx_ring->ena_com_io_cq);
2437 netif_err(adapter, ifup, adapter->netdev,
2438 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2443 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2447 ena_com_destroy_io_queue(ena_dev, ena_qid);
2451 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2453 struct ena_com_dev *ena_dev = adapter->ena_dev;
2456 for (i = 0; i < adapter->num_io_queues; i++) {
2457 rc = ena_create_io_rx_queue(adapter, i);
2460 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2467 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2468 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2474 static void set_io_rings_size(struct ena_adapter *adapter,
2480 for (i = 0; i < adapter->num_io_queues; i++) {
2481 adapter->tx_ring[i].ring_size = new_tx_size;
2482 adapter->rx_ring[i].ring_size = new_rx_size;
2486 /* This function allows queue allocation to backoff when the system is
2487 * low on memory. If there is not enough memory to allocate io queues
2488 * the driver will try to allocate smaller queues.
2490 * The backoff algorithm is as follows:
2491 * 1. Try to allocate TX and RX and if successful.
2492 * 1.1. return success
2494 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2496 * 3. If TX or RX is smaller than 256
2497 * 3.1. return failure.
2499 * 4.1. go back to 1.
2501 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2503 int rc, cur_rx_ring_size, cur_tx_ring_size;
2504 int new_rx_ring_size, new_tx_ring_size;
2506 /* current queue sizes might be set to smaller than the requested
2507 * ones due to past queue allocation failures.
2509 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2510 adapter->requested_rx_ring_size);
2513 if (ena_xdp_present(adapter)) {
2514 rc = ena_setup_and_create_all_xdp_queues(adapter);
2519 rc = ena_setup_tx_resources_in_range(adapter,
2521 adapter->num_io_queues);
2525 rc = ena_create_io_tx_queues_in_range(adapter,
2527 adapter->num_io_queues);
2529 goto err_create_tx_queues;
2531 rc = ena_setup_all_rx_resources(adapter);
2535 rc = ena_create_all_io_rx_queues(adapter);
2537 goto err_create_rx_queues;
2541 err_create_rx_queues:
2542 ena_free_all_io_rx_resources(adapter);
2544 ena_destroy_all_tx_queues(adapter);
2545 err_create_tx_queues:
2546 ena_free_all_io_tx_resources(adapter);
2548 if (rc != -ENOMEM) {
2549 netif_err(adapter, ifup, adapter->netdev,
2550 "Queue creation failed with error code %d\n",
2555 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2556 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2558 netif_err(adapter, ifup, adapter->netdev,
2559 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2560 cur_tx_ring_size, cur_rx_ring_size);
2562 new_tx_ring_size = cur_tx_ring_size;
2563 new_rx_ring_size = cur_rx_ring_size;
2565 /* Decrease the size of the larger queue, or
2566 * decrease both if they are the same size.
2568 if (cur_rx_ring_size <= cur_tx_ring_size)
2569 new_tx_ring_size = cur_tx_ring_size / 2;
2570 if (cur_rx_ring_size >= cur_tx_ring_size)
2571 new_rx_ring_size = cur_rx_ring_size / 2;
2573 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2574 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2575 netif_err(adapter, ifup, adapter->netdev,
2576 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2581 netif_err(adapter, ifup, adapter->netdev,
2582 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2586 set_io_rings_size(adapter, new_tx_ring_size,
2591 static int ena_up(struct ena_adapter *adapter)
2593 int io_queue_count, rc, i;
2595 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2597 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2598 ena_setup_io_intr(adapter);
2600 /* napi poll functions should be initialized before running
2601 * request_irq(), to handle a rare condition where there is a pending
2602 * interrupt, causing the ISR to fire immediately while the poll
2603 * function wasn't set yet, causing a null dereference
2605 ena_init_napi_in_range(adapter, 0, io_queue_count);
2607 rc = ena_request_io_irq(adapter);
2611 rc = create_queues_with_size_backoff(adapter);
2613 goto err_create_queues_with_backoff;
2615 rc = ena_up_complete(adapter);
2619 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2620 netif_carrier_on(adapter->netdev);
2622 ena_increase_stat(&adapter->dev_stats.interface_up, 1,
2625 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2627 /* Enable completion queues interrupt */
2628 for (i = 0; i < adapter->num_io_queues; i++)
2629 ena_unmask_interrupt(&adapter->tx_ring[i],
2630 &adapter->rx_ring[i]);
2632 /* schedule napi in case we had pending packets
2633 * from the last time we disable napi
2635 for (i = 0; i < io_queue_count; i++)
2636 napi_schedule(&adapter->ena_napi[i].napi);
2641 ena_destroy_all_tx_queues(adapter);
2642 ena_free_all_io_tx_resources(adapter);
2643 ena_destroy_all_rx_queues(adapter);
2644 ena_free_all_io_rx_resources(adapter);
2645 err_create_queues_with_backoff:
2646 ena_free_io_irq(adapter);
2648 ena_del_napi_in_range(adapter, 0, io_queue_count);
2653 static void ena_down(struct ena_adapter *adapter)
2655 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2657 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2659 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2661 ena_increase_stat(&adapter->dev_stats.interface_down, 1,
2664 netif_carrier_off(adapter->netdev);
2665 netif_tx_disable(adapter->netdev);
2667 /* After this point the napi handler won't enable the tx queue */
2668 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2670 /* After destroy the queue there won't be any new interrupts */
2672 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2675 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2677 netif_err(adapter, ifdown, adapter->netdev,
2678 "Device reset failed\n");
2679 /* stop submitting admin commands on a device that was reset */
2680 ena_com_set_admin_running_state(adapter->ena_dev, false);
2683 ena_destroy_all_io_queues(adapter);
2685 ena_disable_io_intr_sync(adapter);
2686 ena_free_io_irq(adapter);
2687 ena_del_napi_in_range(adapter, 0, io_queue_count);
2689 ena_free_all_tx_bufs(adapter);
2690 ena_free_all_rx_bufs(adapter);
2691 ena_free_all_io_tx_resources(adapter);
2692 ena_free_all_io_rx_resources(adapter);
2695 /* ena_open - Called when a network interface is made active
2696 * @netdev: network interface device structure
2698 * Returns 0 on success, negative value on failure
2700 * The open entry point is called when a network interface is made
2701 * active by the system (IFF_UP). At this point all resources needed
2702 * for transmit and receive operations are allocated, the interrupt
2703 * handler is registered with the OS, the watchdog timer is started,
2704 * and the stack is notified that the interface is ready.
2706 static int ena_open(struct net_device *netdev)
2708 struct ena_adapter *adapter = netdev_priv(netdev);
2711 /* Notify the stack of the actual queue counts. */
2712 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2714 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2718 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2720 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2724 rc = ena_up(adapter);
2731 /* ena_close - Disables a network interface
2732 * @netdev: network interface device structure
2734 * Returns 0, this is not allowed to fail
2736 * The close entry point is called when an interface is de-activated
2737 * by the OS. The hardware is still under the drivers control, but
2738 * needs to be disabled. A global MAC reset is issued to stop the
2739 * hardware, and all transmit and receive resources are freed.
2741 static int ena_close(struct net_device *netdev)
2743 struct ena_adapter *adapter = netdev_priv(netdev);
2745 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2747 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2750 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2753 /* Check for device status and issue reset if needed*/
2754 check_for_admin_com_state(adapter);
2755 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2756 netif_err(adapter, ifdown, adapter->netdev,
2757 "Destroy failure, restarting device\n");
2758 ena_dump_stats_to_dmesg(adapter);
2759 /* rtnl lock already obtained in dev_ioctl() layer */
2760 ena_destroy_device(adapter, false);
2761 ena_restore_device(adapter);
2767 int ena_update_queue_sizes(struct ena_adapter *adapter,
2773 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2774 ena_close(adapter->netdev);
2775 adapter->requested_tx_ring_size = new_tx_size;
2776 adapter->requested_rx_ring_size = new_rx_size;
2777 ena_init_io_rings(adapter,
2779 adapter->xdp_num_queues +
2780 adapter->num_io_queues);
2781 return dev_was_up ? ena_up(adapter) : 0;
2784 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2786 struct ena_com_dev *ena_dev = adapter->ena_dev;
2787 int prev_channel_count;
2790 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2791 ena_close(adapter->netdev);
2792 prev_channel_count = adapter->num_io_queues;
2793 adapter->num_io_queues = new_channel_count;
2794 if (ena_xdp_present(adapter) &&
2795 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2796 adapter->xdp_first_ring = new_channel_count;
2797 adapter->xdp_num_queues = new_channel_count;
2798 if (prev_channel_count > new_channel_count)
2799 ena_xdp_exchange_program_rx_in_range(adapter,
2802 prev_channel_count);
2804 ena_xdp_exchange_program_rx_in_range(adapter,
2805 adapter->xdp_bpf_prog,
2810 /* We need to destroy the rss table so that the indirection
2811 * table will be reinitialized by ena_up()
2813 ena_com_rss_destroy(ena_dev);
2814 ena_init_io_rings(adapter,
2816 adapter->xdp_num_queues +
2817 adapter->num_io_queues);
2818 return dev_was_up ? ena_open(adapter->netdev) : 0;
2821 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2822 struct sk_buff *skb,
2823 bool disable_meta_caching)
2825 u32 mss = skb_shinfo(skb)->gso_size;
2826 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2829 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2830 ena_tx_ctx->l4_csum_enable = 1;
2832 ena_tx_ctx->tso_enable = 1;
2833 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2834 ena_tx_ctx->l4_csum_partial = 0;
2836 ena_tx_ctx->tso_enable = 0;
2837 ena_meta->l4_hdr_len = 0;
2838 ena_tx_ctx->l4_csum_partial = 1;
2841 switch (ip_hdr(skb)->version) {
2843 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2844 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2847 ena_tx_ctx->l3_csum_enable = 1;
2848 l4_protocol = ip_hdr(skb)->protocol;
2851 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2852 l4_protocol = ipv6_hdr(skb)->nexthdr;
2858 if (l4_protocol == IPPROTO_TCP)
2859 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2861 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2863 ena_meta->mss = mss;
2864 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2865 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2866 ena_tx_ctx->meta_valid = 1;
2867 } else if (disable_meta_caching) {
2868 memset(ena_meta, 0, sizeof(*ena_meta));
2869 ena_tx_ctx->meta_valid = 1;
2871 ena_tx_ctx->meta_valid = 0;
2875 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2876 struct sk_buff *skb)
2878 int num_frags, header_len, rc;
2880 num_frags = skb_shinfo(skb)->nr_frags;
2881 header_len = skb_headlen(skb);
2883 if (num_frags < tx_ring->sgl_size)
2886 if ((num_frags == tx_ring->sgl_size) &&
2887 (header_len < tx_ring->tx_max_header_size))
2890 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
2892 rc = skb_linearize(skb);
2894 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
2901 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2902 struct ena_tx_buffer *tx_info,
2903 struct sk_buff *skb,
2907 struct ena_adapter *adapter = tx_ring->adapter;
2908 struct ena_com_buf *ena_buf;
2910 u32 skb_head_len, frag_len, last_frag;
2915 skb_head_len = skb_headlen(skb);
2917 ena_buf = tx_info->bufs;
2919 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2920 /* When the device is LLQ mode, the driver will copy
2921 * the header into the device memory space.
2922 * the ena_com layer assume the header is in a linear
2924 * This assumption might be wrong since part of the header
2925 * can be in the fragmented buffers.
2926 * Use skb_header_pointer to make sure the header is in a
2927 * linear memory space.
2930 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2931 *push_hdr = skb_header_pointer(skb, 0, push_len,
2932 tx_ring->push_buf_intermediate_buf);
2933 *header_len = push_len;
2934 if (unlikely(skb->data != *push_hdr)) {
2935 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
2938 delta = push_len - skb_head_len;
2942 *header_len = min_t(u32, skb_head_len,
2943 tx_ring->tx_max_header_size);
2946 netif_dbg(adapter, tx_queued, adapter->netdev,
2947 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2948 *push_hdr, push_len);
2950 if (skb_head_len > push_len) {
2951 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2952 skb_head_len - push_len, DMA_TO_DEVICE);
2953 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2954 goto error_report_dma_error;
2956 ena_buf->paddr = dma;
2957 ena_buf->len = skb_head_len - push_len;
2960 tx_info->num_of_bufs++;
2961 tx_info->map_linear_data = 1;
2963 tx_info->map_linear_data = 0;
2966 last_frag = skb_shinfo(skb)->nr_frags;
2968 for (i = 0; i < last_frag; i++) {
2969 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2971 frag_len = skb_frag_size(frag);
2973 if (unlikely(delta >= frag_len)) {
2978 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2979 frag_len - delta, DMA_TO_DEVICE);
2980 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2981 goto error_report_dma_error;
2983 ena_buf->paddr = dma;
2984 ena_buf->len = frag_len - delta;
2986 tx_info->num_of_bufs++;
2992 error_report_dma_error:
2993 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
2995 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
2997 tx_info->skb = NULL;
2999 tx_info->num_of_bufs += i;
3000 ena_unmap_tx_buff(tx_ring, tx_info);
3005 /* Called with netif_tx_lock. */
3006 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
3008 struct ena_adapter *adapter = netdev_priv(dev);
3009 struct ena_tx_buffer *tx_info;
3010 struct ena_com_tx_ctx ena_tx_ctx;
3011 struct ena_ring *tx_ring;
3012 struct netdev_queue *txq;
3014 u16 next_to_use, req_id, header_len;
3017 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
3018 /* Determine which tx ring we will be placed on */
3019 qid = skb_get_queue_mapping(skb);
3020 tx_ring = &adapter->tx_ring[qid];
3021 txq = netdev_get_tx_queue(dev, qid);
3023 rc = ena_check_and_linearize_skb(tx_ring, skb);
3025 goto error_drop_packet;
3027 skb_tx_timestamp(skb);
3029 next_to_use = tx_ring->next_to_use;
3030 req_id = tx_ring->free_ids[next_to_use];
3031 tx_info = &tx_ring->tx_buffer_info[req_id];
3032 tx_info->num_of_bufs = 0;
3034 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
3036 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
3038 goto error_drop_packet;
3040 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3041 ena_tx_ctx.ena_bufs = tx_info->bufs;
3042 ena_tx_ctx.push_header = push_hdr;
3043 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3044 ena_tx_ctx.req_id = req_id;
3045 ena_tx_ctx.header_len = header_len;
3047 /* set flags and meta data */
3048 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
3050 rc = ena_xmit_common(dev,
3057 goto error_unmap_dma;
3059 netdev_tx_sent_queue(txq, skb->len);
3061 /* stop the queue when no more space available, the packet can have up
3062 * to sgl_size + 2. one for the meta descriptor and one for header
3063 * (if the header is larger than tx_max_header_size).
3065 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3066 tx_ring->sgl_size + 2))) {
3067 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3070 netif_tx_stop_queue(txq);
3071 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
3074 /* There is a rare condition where this function decide to
3075 * stop the queue but meanwhile clean_tx_irq updates
3076 * next_to_completion and terminates.
3077 * The queue will remain stopped forever.
3078 * To solve this issue add a mb() to make sure that
3079 * netif_tx_stop_queue() write is vissible before checking if
3080 * there is additional space in the queue.
3084 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3085 ENA_TX_WAKEUP_THRESH)) {
3086 netif_tx_wake_queue(txq);
3087 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
3092 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
3093 /* trigger the dma engine. ena_com_write_sq_doorbell()
3096 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3097 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
3101 return NETDEV_TX_OK;
3104 ena_unmap_tx_buff(tx_ring, tx_info);
3105 tx_info->skb = NULL;
3109 return NETDEV_TX_OK;
3112 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3113 struct net_device *sb_dev)
3116 /* we suspect that this is good for in--kernel network services that
3117 * want to loop incoming skb rx to tx in normal user generated traffic,
3118 * most probably we will not get to this
3120 if (skb_rx_queue_recorded(skb))
3121 qid = skb_get_rx_queue(skb);
3123 qid = netdev_pick_tx(dev, skb, NULL);
3128 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3130 struct device *dev = &pdev->dev;
3131 struct ena_admin_host_info *host_info;
3134 /* Allocate only the host info */
3135 rc = ena_com_allocate_host_info(ena_dev);
3137 dev_err(dev, "Cannot allocate host info\n");
3141 host_info = ena_dev->host_attr.host_info;
3143 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3144 host_info->os_type = ENA_ADMIN_OS_LINUX;
3145 host_info->kernel_ver = LINUX_VERSION_CODE;
3146 strlcpy(host_info->kernel_ver_str, utsname()->version,
3147 sizeof(host_info->kernel_ver_str) - 1);
3148 host_info->os_dist = 0;
3149 strncpy(host_info->os_dist_str, utsname()->release,
3150 sizeof(host_info->os_dist_str) - 1);
3151 host_info->driver_version =
3152 (DRV_MODULE_GEN_MAJOR) |
3153 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3154 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3155 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3156 host_info->num_cpus = num_online_cpus();
3158 host_info->driver_supported_features =
3159 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3160 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3161 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3162 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
3164 rc = ena_com_set_host_attributes(ena_dev);
3166 if (rc == -EOPNOTSUPP)
3167 dev_warn(dev, "Cannot set host attributes\n");
3169 dev_err(dev, "Cannot set host attributes\n");
3177 ena_com_delete_host_info(ena_dev);
3180 static void ena_config_debug_area(struct ena_adapter *adapter)
3182 u32 debug_area_size;
3185 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3186 if (ss_count <= 0) {
3187 netif_err(adapter, drv, adapter->netdev,
3188 "SS count is negative\n");
3192 /* allocate 32 bytes for each string and 64bit for the value */
3193 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3195 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3197 netif_err(adapter, drv, adapter->netdev,
3198 "Cannot allocate debug area\n");
3202 rc = ena_com_set_host_attributes(adapter->ena_dev);
3204 if (rc == -EOPNOTSUPP)
3205 netif_warn(adapter, drv, adapter->netdev,
3206 "Cannot set host attributes\n");
3208 netif_err(adapter, drv, adapter->netdev,
3209 "Cannot set host attributes\n");
3215 ena_com_delete_debug_area(adapter->ena_dev);
3218 int ena_update_hw_stats(struct ena_adapter *adapter)
3222 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3224 dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
3231 static void ena_get_stats64(struct net_device *netdev,
3232 struct rtnl_link_stats64 *stats)
3234 struct ena_adapter *adapter = netdev_priv(netdev);
3235 struct ena_ring *rx_ring, *tx_ring;
3241 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3244 for (i = 0; i < adapter->num_io_queues; i++) {
3247 tx_ring = &adapter->tx_ring[i];
3250 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3251 packets = tx_ring->tx_stats.cnt;
3252 bytes = tx_ring->tx_stats.bytes;
3253 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3255 stats->tx_packets += packets;
3256 stats->tx_bytes += bytes;
3258 rx_ring = &adapter->rx_ring[i];
3261 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3262 packets = rx_ring->rx_stats.cnt;
3263 bytes = rx_ring->rx_stats.bytes;
3264 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3266 stats->rx_packets += packets;
3267 stats->rx_bytes += bytes;
3271 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3272 rx_drops = adapter->dev_stats.rx_drops;
3273 tx_drops = adapter->dev_stats.tx_drops;
3274 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3276 stats->rx_dropped = rx_drops;
3277 stats->tx_dropped = tx_drops;
3279 stats->multicast = 0;
3280 stats->collisions = 0;
3282 stats->rx_length_errors = 0;
3283 stats->rx_crc_errors = 0;
3284 stats->rx_frame_errors = 0;
3285 stats->rx_fifo_errors = 0;
3286 stats->rx_missed_errors = 0;
3287 stats->tx_window_errors = 0;
3289 stats->rx_errors = 0;
3290 stats->tx_errors = 0;
3293 static const struct net_device_ops ena_netdev_ops = {
3294 .ndo_open = ena_open,
3295 .ndo_stop = ena_close,
3296 .ndo_start_xmit = ena_start_xmit,
3297 .ndo_select_queue = ena_select_queue,
3298 .ndo_get_stats64 = ena_get_stats64,
3299 .ndo_tx_timeout = ena_tx_timeout,
3300 .ndo_change_mtu = ena_change_mtu,
3301 .ndo_set_mac_address = NULL,
3302 .ndo_validate_addr = eth_validate_addr,
3304 .ndo_xdp_xmit = ena_xdp_xmit,
3307 static int ena_device_validate_params(struct ena_adapter *adapter,
3308 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3310 struct net_device *netdev = adapter->netdev;
3313 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3316 netif_err(adapter, drv, netdev,
3317 "Error, mac address are different\n");
3321 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3322 netif_err(adapter, drv, netdev,
3323 "Error, device max mtu is smaller than netdev MTU\n");
3330 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3332 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3333 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3334 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3335 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3336 llq_config->llq_ring_entry_size_value = 128;
3339 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3340 struct ena_com_dev *ena_dev,
3341 struct ena_admin_feature_llq_desc *llq,
3342 struct ena_llq_configurations *llq_default_configurations)
3345 u32 llq_feature_mask;
3347 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3348 if (!(ena_dev->supported_features & llq_feature_mask)) {
3350 "LLQ is not supported Fallback to host mode policy.\n");
3351 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3355 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3358 "Failed to configure the device mode. Fallback to host mode policy.\n");
3359 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3365 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3368 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3371 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3373 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3374 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3380 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3381 pci_resource_start(pdev, ENA_MEM_BAR),
3382 pci_resource_len(pdev, ENA_MEM_BAR));
3384 if (!ena_dev->mem_bar)
3390 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3391 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3394 struct ena_llq_configurations llq_config;
3395 struct device *dev = &pdev->dev;
3396 bool readless_supported;
3401 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3403 dev_err(dev, "Failed to init mmio read less\n");
3407 /* The PCIe configuration space revision id indicate if mmio reg
3410 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3411 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3413 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3415 dev_err(dev, "Can not reset device\n");
3416 goto err_mmio_read_less;
3419 rc = ena_com_validate_version(ena_dev);
3421 dev_err(dev, "Device version is too low\n");
3422 goto err_mmio_read_less;
3425 dma_width = ena_com_get_dma_width(ena_dev);
3426 if (dma_width < 0) {
3427 dev_err(dev, "Invalid dma width value %d", dma_width);
3429 goto err_mmio_read_less;
3432 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3434 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3435 goto err_mmio_read_less;
3438 /* ENA admin level init */
3439 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3442 "Can not initialize ena admin queue with device\n");
3443 goto err_mmio_read_less;
3446 /* To enable the msix interrupts the driver needs to know the number
3447 * of queues. So the driver uses polling mode to retrieve this
3450 ena_com_set_admin_polling_mode(ena_dev, true);
3452 ena_config_host_info(ena_dev, pdev);
3454 /* Get Device Attributes*/
3455 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3457 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3458 goto err_admin_init;
3461 /* Try to turn all the available aenq groups */
3462 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3463 BIT(ENA_ADMIN_FATAL_ERROR) |
3464 BIT(ENA_ADMIN_WARNING) |
3465 BIT(ENA_ADMIN_NOTIFICATION) |
3466 BIT(ENA_ADMIN_KEEP_ALIVE);
3468 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3470 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3472 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3473 goto err_admin_init;
3476 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3478 set_default_llq_configurations(&llq_config);
3480 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3483 dev_err(dev, "ENA device init failed\n");
3484 goto err_admin_init;
3490 ena_com_delete_host_info(ena_dev);
3491 ena_com_admin_destroy(ena_dev);
3493 ena_com_mmio_reg_read_request_destroy(ena_dev);
3498 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3500 struct ena_com_dev *ena_dev = adapter->ena_dev;
3501 struct device *dev = &adapter->pdev->dev;
3504 rc = ena_enable_msix(adapter);
3506 dev_err(dev, "Can not reserve msix vectors\n");
3510 ena_setup_mgmnt_intr(adapter);
3512 rc = ena_request_mgmnt_irq(adapter);
3514 dev_err(dev, "Can not setup management interrupts\n");
3515 goto err_disable_msix;
3518 ena_com_set_admin_polling_mode(ena_dev, false);
3520 ena_com_admin_aenq_enable(ena_dev);
3525 ena_disable_msix(adapter);
3530 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3532 struct net_device *netdev = adapter->netdev;
3533 struct ena_com_dev *ena_dev = adapter->ena_dev;
3536 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3539 netif_carrier_off(netdev);
3541 del_timer_sync(&adapter->timer_service);
3543 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3544 adapter->dev_up_before_reset = dev_up;
3546 ena_com_set_admin_running_state(ena_dev, false);
3548 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3551 /* Stop the device from sending AENQ events (in case reset flag is set
3552 * and device is up, ena_down() already reset the device.
3554 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3555 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3557 ena_free_mgmnt_irq(adapter);
3559 ena_disable_msix(adapter);
3561 ena_com_abort_admin_commands(ena_dev);
3563 ena_com_wait_for_abort_completion(ena_dev);
3565 ena_com_admin_destroy(ena_dev);
3567 ena_com_mmio_reg_read_request_destroy(ena_dev);
3569 /* return reset reason to default value */
3570 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3572 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3573 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3576 static int ena_restore_device(struct ena_adapter *adapter)
3578 struct ena_com_dev_get_features_ctx get_feat_ctx;
3579 struct ena_com_dev *ena_dev = adapter->ena_dev;
3580 struct pci_dev *pdev = adapter->pdev;
3584 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3585 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3587 dev_err(&pdev->dev, "Can not initialize device\n");
3590 adapter->wd_state = wd_state;
3592 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3594 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3595 goto err_device_destroy;
3598 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3600 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3601 goto err_device_destroy;
3603 /* If the interface was up before the reset bring it up */
3604 if (adapter->dev_up_before_reset) {
3605 rc = ena_up(adapter);
3607 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3608 goto err_disable_msix;
3612 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3614 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3615 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3616 netif_carrier_on(adapter->netdev);
3618 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3619 adapter->last_keep_alive_jiffies = jiffies;
3621 dev_err(&pdev->dev, "Device reset completed successfully\n");
3625 ena_free_mgmnt_irq(adapter);
3626 ena_disable_msix(adapter);
3628 ena_com_abort_admin_commands(ena_dev);
3629 ena_com_wait_for_abort_completion(ena_dev);
3630 ena_com_admin_destroy(ena_dev);
3631 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3632 ena_com_mmio_reg_read_request_destroy(ena_dev);
3634 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3635 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3637 "Reset attempt failed. Can not reset the device\n");
3642 static void ena_fw_reset_device(struct work_struct *work)
3644 struct ena_adapter *adapter =
3645 container_of(work, struct ena_adapter, reset_task);
3649 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3650 ena_destroy_device(adapter, false);
3651 ena_restore_device(adapter);
3657 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3658 struct ena_ring *rx_ring)
3660 if (likely(rx_ring->first_interrupt))
3663 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3666 rx_ring->no_interrupt_event_cnt++;
3668 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3669 netif_err(adapter, rx_err, adapter->netdev,
3670 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3672 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3673 smp_mb__before_atomic();
3674 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3681 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3682 struct ena_ring *tx_ring)
3684 struct ena_tx_buffer *tx_buf;
3685 unsigned long last_jiffies;
3689 for (i = 0; i < tx_ring->ring_size; i++) {
3690 tx_buf = &tx_ring->tx_buffer_info[i];
3691 last_jiffies = tx_buf->last_jiffies;
3693 if (last_jiffies == 0)
3694 /* no pending Tx at this location */
3697 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3698 2 * adapter->missing_tx_completion_to))) {
3699 /* If after graceful period interrupt is still not
3700 * received, we schedule a reset
3702 netif_err(adapter, tx_err, adapter->netdev,
3703 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3705 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3706 smp_mb__before_atomic();
3707 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3711 if (unlikely(time_is_before_jiffies(last_jiffies +
3712 adapter->missing_tx_completion_to))) {
3713 if (!tx_buf->print_once)
3714 netif_notice(adapter, tx_err, adapter->netdev,
3715 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3718 tx_buf->print_once = 1;
3723 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3724 netif_err(adapter, tx_err, adapter->netdev,
3725 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3727 adapter->missing_tx_completion_threshold);
3728 adapter->reset_reason =
3729 ENA_REGS_RESET_MISS_TX_CMPL;
3730 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3734 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
3740 static void check_for_missing_completions(struct ena_adapter *adapter)
3742 struct ena_ring *tx_ring;
3743 struct ena_ring *rx_ring;
3747 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3748 /* Make sure the driver doesn't turn the device in other process */
3751 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3754 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3757 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3760 budget = ENA_MONITORED_TX_QUEUES;
3762 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3763 tx_ring = &adapter->tx_ring[i];
3764 rx_ring = &adapter->rx_ring[i];
3766 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3770 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3771 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3780 adapter->last_monitored_tx_qid = i % io_queue_count;
3783 /* trigger napi schedule after 2 consecutive detections */
3784 #define EMPTY_RX_REFILL 2
3785 /* For the rare case where the device runs out of Rx descriptors and the
3786 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3788 * This case will lead to a deadlock:
3789 * The device won't send interrupts since all the new Rx packets will be dropped
3790 * The napi handler won't allocate new Rx descriptors so the device will be
3791 * able to send new packets.
3793 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3794 * It is recommended to have at least 512MB, with a minimum of 128MB for
3795 * constrained environment).
3797 * When such a situation is detected - Reschedule napi
3799 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3801 struct ena_ring *rx_ring;
3802 int i, refill_required;
3804 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3807 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3810 for (i = 0; i < adapter->num_io_queues; i++) {
3811 rx_ring = &adapter->rx_ring[i];
3813 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3814 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3815 rx_ring->empty_rx_queue++;
3817 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3818 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
3821 netif_err(adapter, drv, adapter->netdev,
3822 "Trigger refill for ring %d\n", i);
3824 napi_schedule(rx_ring->napi);
3825 rx_ring->empty_rx_queue = 0;
3828 rx_ring->empty_rx_queue = 0;
3833 /* Check for keep alive expiration */
3834 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3836 unsigned long keep_alive_expired;
3838 if (!adapter->wd_state)
3841 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3844 keep_alive_expired = adapter->last_keep_alive_jiffies +
3845 adapter->keep_alive_timeout;
3846 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3847 netif_err(adapter, drv, adapter->netdev,
3848 "Keep alive watchdog timeout.\n");
3849 ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
3851 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3852 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3856 static void check_for_admin_com_state(struct ena_adapter *adapter)
3858 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3859 netif_err(adapter, drv, adapter->netdev,
3860 "ENA admin queue is not in running state!\n");
3861 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
3863 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3864 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3868 static void ena_update_hints(struct ena_adapter *adapter,
3869 struct ena_admin_ena_hw_hints *hints)
3871 struct net_device *netdev = adapter->netdev;
3873 if (hints->admin_completion_tx_timeout)
3874 adapter->ena_dev->admin_queue.completion_timeout =
3875 hints->admin_completion_tx_timeout * 1000;
3877 if (hints->mmio_read_timeout)
3878 /* convert to usec */
3879 adapter->ena_dev->mmio_read.reg_read_to =
3880 hints->mmio_read_timeout * 1000;
3882 if (hints->missed_tx_completion_count_threshold_to_reset)
3883 adapter->missing_tx_completion_threshold =
3884 hints->missed_tx_completion_count_threshold_to_reset;
3886 if (hints->missing_tx_completion_timeout) {
3887 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3888 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3890 adapter->missing_tx_completion_to =
3891 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3894 if (hints->netdev_wd_timeout)
3895 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3897 if (hints->driver_watchdog_timeout) {
3898 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3899 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3901 adapter->keep_alive_timeout =
3902 msecs_to_jiffies(hints->driver_watchdog_timeout);
3906 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3907 struct net_device *netdev)
3909 host_info->supported_network_features[0] =
3910 netdev->features & GENMASK_ULL(31, 0);
3911 host_info->supported_network_features[1] =
3912 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3915 static void ena_timer_service(struct timer_list *t)
3917 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3918 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3919 struct ena_admin_host_info *host_info =
3920 adapter->ena_dev->host_attr.host_info;
3922 check_for_missing_keep_alive(adapter);
3924 check_for_admin_com_state(adapter);
3926 check_for_missing_completions(adapter);
3928 check_for_empty_rx_ring(adapter);
3931 ena_dump_stats_to_buf(adapter, debug_area);
3934 ena_update_host_info(host_info, adapter->netdev);
3936 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3937 netif_err(adapter, drv, adapter->netdev,
3938 "Trigger reset is on\n");
3939 ena_dump_stats_to_dmesg(adapter);
3940 queue_work(ena_wq, &adapter->reset_task);
3944 /* Reset the timer */
3945 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3948 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3949 struct ena_com_dev *ena_dev,
3950 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3952 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3954 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3955 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3956 &get_feat_ctx->max_queue_ext.max_queue_ext;
3957 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3958 max_queue_ext->max_rx_cq_num);
3960 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3961 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3963 struct ena_admin_queue_feature_desc *max_queues =
3964 &get_feat_ctx->max_queues;
3965 io_tx_sq_num = max_queues->max_sq_num;
3966 io_tx_cq_num = max_queues->max_cq_num;
3967 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3970 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3971 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3972 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3974 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3975 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3976 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3977 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3978 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
3979 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3980 if (unlikely(!max_num_io_queues)) {
3981 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3985 return max_num_io_queues;
3988 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3989 struct net_device *netdev)
3991 netdev_features_t dev_features = 0;
3993 /* Set offload features */
3994 if (feat->offload.tx &
3995 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3996 dev_features |= NETIF_F_IP_CSUM;
3998 if (feat->offload.tx &
3999 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
4000 dev_features |= NETIF_F_IPV6_CSUM;
4002 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
4003 dev_features |= NETIF_F_TSO;
4005 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
4006 dev_features |= NETIF_F_TSO6;
4008 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
4009 dev_features |= NETIF_F_TSO_ECN;
4011 if (feat->offload.rx_supported &
4012 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
4013 dev_features |= NETIF_F_RXCSUM;
4015 if (feat->offload.rx_supported &
4016 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
4017 dev_features |= NETIF_F_RXCSUM;
4025 netdev->hw_features |= netdev->features;
4026 netdev->vlan_features |= netdev->features;
4029 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
4030 struct ena_com_dev_get_features_ctx *feat)
4032 struct net_device *netdev = adapter->netdev;
4034 /* Copy mac address */
4035 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
4036 eth_hw_addr_random(netdev);
4037 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
4039 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
4040 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4043 /* Set offload features */
4044 ena_set_dev_offloads(feat, netdev);
4046 adapter->max_mtu = feat->dev_attr.max_mtu;
4047 netdev->max_mtu = adapter->max_mtu;
4048 netdev->min_mtu = ENA_MIN_MTU;
4051 static int ena_rss_init_default(struct ena_adapter *adapter)
4053 struct ena_com_dev *ena_dev = adapter->ena_dev;
4054 struct device *dev = &adapter->pdev->dev;
4058 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4060 dev_err(dev, "Cannot init indirect table\n");
4064 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4065 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4066 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4067 ENA_IO_RXQ_IDX(val));
4068 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4069 dev_err(dev, "Cannot fill indirect table\n");
4070 goto err_fill_indir;
4074 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4075 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4076 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4077 dev_err(dev, "Cannot fill hash function\n");
4078 goto err_fill_indir;
4081 rc = ena_com_set_default_hash_ctrl(ena_dev);
4082 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4083 dev_err(dev, "Cannot fill hash control\n");
4084 goto err_fill_indir;
4090 ena_com_rss_destroy(ena_dev);
4096 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4098 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4100 pci_release_selected_regions(pdev, release_bars);
4104 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4106 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4107 struct ena_com_dev *ena_dev = ctx->ena_dev;
4108 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4109 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4110 u32 max_tx_queue_size;
4111 u32 max_rx_queue_size;
4113 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4114 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4115 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4116 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4117 max_queue_ext->max_rx_sq_depth);
4118 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4120 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4121 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4122 llq->max_llq_depth);
4124 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4125 max_queue_ext->max_tx_sq_depth);
4127 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4128 max_queue_ext->max_per_packet_tx_descs);
4129 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4130 max_queue_ext->max_per_packet_rx_descs);
4132 struct ena_admin_queue_feature_desc *max_queues =
4133 &ctx->get_feat_ctx->max_queues;
4134 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4135 max_queues->max_sq_depth);
4136 max_tx_queue_size = max_queues->max_cq_depth;
4138 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4139 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4140 llq->max_llq_depth);
4142 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4143 max_queues->max_sq_depth);
4145 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4146 max_queues->max_packet_tx_descs);
4147 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4148 max_queues->max_packet_rx_descs);
4151 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4152 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4154 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4156 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4159 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4160 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4162 ctx->max_tx_queue_size = max_tx_queue_size;
4163 ctx->max_rx_queue_size = max_rx_queue_size;
4164 ctx->tx_queue_size = tx_queue_size;
4165 ctx->rx_queue_size = rx_queue_size;
4170 /* ena_probe - Device Initialization Routine
4171 * @pdev: PCI device information struct
4172 * @ent: entry in ena_pci_tbl
4174 * Returns 0 on success, negative on failure
4176 * ena_probe initializes an adapter identified by a pci_dev structure.
4177 * The OS initialization, configuring of the adapter private structure,
4178 * and a hardware reset occur.
4180 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4182 struct ena_calc_queue_size_ctx calc_queue_ctx = {};
4183 struct ena_com_dev_get_features_ctx get_feat_ctx;
4184 struct ena_com_dev *ena_dev = NULL;
4185 struct ena_adapter *adapter;
4186 struct net_device *netdev;
4187 static int adapters_found;
4188 u32 max_num_io_queues;
4192 dev_dbg(&pdev->dev, "%s\n", __func__);
4194 rc = pci_enable_device_mem(pdev);
4196 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4200 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
4202 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
4203 goto err_disable_device;
4206 pci_set_master(pdev);
4208 ena_dev = vzalloc(sizeof(*ena_dev));
4211 goto err_disable_device;
4214 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4215 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4217 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4219 goto err_free_ena_dev;
4222 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4223 pci_resource_start(pdev, ENA_REG_BAR),
4224 pci_resource_len(pdev, ENA_REG_BAR));
4225 if (!ena_dev->reg_bar) {
4226 dev_err(&pdev->dev, "Failed to remap regs bar\n");
4228 goto err_free_region;
4231 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4233 ena_dev->dmadev = &pdev->dev;
4235 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
4237 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4239 goto err_free_region;
4242 SET_NETDEV_DEV(netdev, &pdev->dev);
4243 adapter = netdev_priv(netdev);
4244 adapter->ena_dev = ena_dev;
4245 adapter->netdev = netdev;
4246 adapter->pdev = pdev;
4247 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4249 ena_dev->net_device = netdev;
4251 pci_set_drvdata(pdev, adapter);
4253 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4255 dev_err(&pdev->dev, "ENA device init failed\n");
4258 goto err_netdev_destroy;
4261 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4263 dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
4264 goto err_device_destroy;
4267 calc_queue_ctx.ena_dev = ena_dev;
4268 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4269 calc_queue_ctx.pdev = pdev;
4271 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4272 * Updated during device initialization with the real granularity
4274 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4275 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4276 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4277 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4278 rc = ena_calc_io_queue_size(&calc_queue_ctx);
4279 if (rc || !max_num_io_queues) {
4281 goto err_device_destroy;
4284 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4286 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4288 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4289 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4290 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4291 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4292 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4293 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4295 adapter->num_io_queues = max_num_io_queues;
4296 adapter->max_num_io_queues = max_num_io_queues;
4297 adapter->last_monitored_tx_qid = 0;
4299 adapter->xdp_first_ring = 0;
4300 adapter->xdp_num_queues = 0;
4302 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4303 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4304 adapter->disable_meta_caching =
4305 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4306 BIT(ENA_ADMIN_DISABLE_META_CACHING));
4308 adapter->wd_state = wd_state;
4310 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4312 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4315 "Failed to query interrupt moderation feature\n");
4316 goto err_device_destroy;
4318 ena_init_io_rings(adapter,
4320 adapter->xdp_num_queues +
4321 adapter->num_io_queues);
4323 netdev->netdev_ops = &ena_netdev_ops;
4324 netdev->watchdog_timeo = TX_TIMEOUT;
4325 ena_set_ethtool_ops(netdev);
4327 netdev->priv_flags |= IFF_UNICAST_FLT;
4329 u64_stats_init(&adapter->syncp);
4331 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4334 "Failed to enable and set the admin interrupts\n");
4335 goto err_worker_destroy;
4337 rc = ena_rss_init_default(adapter);
4338 if (rc && (rc != -EOPNOTSUPP)) {
4339 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4343 ena_config_debug_area(adapter);
4345 if (!ena_update_hw_stats(adapter))
4346 adapter->eni_stats_supported = true;
4348 adapter->eni_stats_supported = false;
4350 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4352 netif_carrier_off(netdev);
4354 rc = register_netdev(netdev);
4356 dev_err(&pdev->dev, "Cannot register net device\n");
4360 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4362 adapter->last_keep_alive_jiffies = jiffies;
4363 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4364 adapter->missing_tx_completion_to = TX_TIMEOUT;
4365 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4367 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4369 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4370 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4372 dev_info(&pdev->dev,
4373 "%s found at mem %lx, mac addr %pM\n",
4374 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4377 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4384 ena_com_delete_debug_area(ena_dev);
4385 ena_com_rss_destroy(ena_dev);
4387 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4388 /* stop submitting admin commands on a device that was reset */
4389 ena_com_set_admin_running_state(ena_dev, false);
4390 ena_free_mgmnt_irq(adapter);
4391 ena_disable_msix(adapter);
4393 del_timer(&adapter->timer_service);
4395 ena_com_delete_host_info(ena_dev);
4396 ena_com_admin_destroy(ena_dev);
4398 free_netdev(netdev);
4400 ena_release_bars(ena_dev, pdev);
4404 pci_disable_device(pdev);
4408 /*****************************************************************************/
4410 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4411 * @pdev: PCI device information struct
4412 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4414 * __ena_shutoff is a helper routine that does the real work on shutdown and
4415 * removal paths; the difference between those paths is with regards to whether
4416 * dettach or unregister the netdevice.
4418 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4420 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4421 struct ena_com_dev *ena_dev;
4422 struct net_device *netdev;
4424 ena_dev = adapter->ena_dev;
4425 netdev = adapter->netdev;
4427 #ifdef CONFIG_RFS_ACCEL
4428 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4429 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4430 netdev->rx_cpu_rmap = NULL;
4432 #endif /* CONFIG_RFS_ACCEL */
4434 /* Make sure timer and reset routine won't be called after
4435 * freeing device resources.
4437 del_timer_sync(&adapter->timer_service);
4438 cancel_work_sync(&adapter->reset_task);
4440 rtnl_lock(); /* lock released inside the below if-else block */
4441 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4442 ena_destroy_device(adapter, true);
4444 netif_device_detach(netdev);
4449 unregister_netdev(netdev);
4450 free_netdev(netdev);
4453 ena_com_rss_destroy(ena_dev);
4455 ena_com_delete_debug_area(ena_dev);
4457 ena_com_delete_host_info(ena_dev);
4459 ena_release_bars(ena_dev, pdev);
4461 pci_disable_device(pdev);
4466 /* ena_remove - Device Removal Routine
4467 * @pdev: PCI device information struct
4469 * ena_remove is called by the PCI subsystem to alert the driver
4470 * that it should release a PCI device.
4473 static void ena_remove(struct pci_dev *pdev)
4475 __ena_shutoff(pdev, false);
4478 /* ena_shutdown - Device Shutdown Routine
4479 * @pdev: PCI device information struct
4481 * ena_shutdown is called by the PCI subsystem to alert the driver that
4482 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4485 static void ena_shutdown(struct pci_dev *pdev)
4487 __ena_shutoff(pdev, true);
4490 /* ena_suspend - PM suspend callback
4491 * @dev_d: Device information struct
4493 static int __maybe_unused ena_suspend(struct device *dev_d)
4495 struct pci_dev *pdev = to_pci_dev(dev_d);
4496 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4498 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
4501 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4503 "Ignoring device reset request as the device is being suspended\n");
4504 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4506 ena_destroy_device(adapter, true);
4511 /* ena_resume - PM resume callback
4512 * @dev_d: Device information struct
4514 static int __maybe_unused ena_resume(struct device *dev_d)
4516 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4519 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
4522 rc = ena_restore_device(adapter);
4527 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4529 static struct pci_driver ena_pci_driver = {
4530 .name = DRV_MODULE_NAME,
4531 .id_table = ena_pci_tbl,
4533 .remove = ena_remove,
4534 .shutdown = ena_shutdown,
4535 .driver.pm = &ena_pm_ops,
4536 .sriov_configure = pci_sriov_configure_simple,
4539 static int __init ena_init(void)
4541 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4543 pr_err("Failed to create workqueue\n");
4547 return pci_register_driver(&ena_pci_driver);
4550 static void __exit ena_cleanup(void)
4552 pci_unregister_driver(&ena_pci_driver);
4555 destroy_workqueue(ena_wq);
4560 /******************************************************************************
4561 ******************************** AENQ Handlers *******************************
4562 *****************************************************************************/
4563 /* ena_update_on_link_change:
4564 * Notify the network interface about the change in link status
4566 static void ena_update_on_link_change(void *adapter_data,
4567 struct ena_admin_aenq_entry *aenq_e)
4569 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4570 struct ena_admin_aenq_link_change_desc *aenq_desc =
4571 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4572 int status = aenq_desc->flags &
4573 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4576 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4577 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4578 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4579 netif_carrier_on(adapter->netdev);
4581 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4582 netif_carrier_off(adapter->netdev);
4586 static void ena_keep_alive_wd(void *adapter_data,
4587 struct ena_admin_aenq_entry *aenq_e)
4589 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4590 struct ena_admin_aenq_keep_alive_desc *desc;
4594 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4595 adapter->last_keep_alive_jiffies = jiffies;
4597 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4598 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4600 u64_stats_update_begin(&adapter->syncp);
4601 /* These stats are accumulated by the device, so the counters indicate
4602 * all drops since last reset.
4604 adapter->dev_stats.rx_drops = rx_drops;
4605 adapter->dev_stats.tx_drops = tx_drops;
4606 u64_stats_update_end(&adapter->syncp);
4609 static void ena_notification(void *adapter_data,
4610 struct ena_admin_aenq_entry *aenq_e)
4612 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4613 struct ena_admin_ena_hw_hints *hints;
4615 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4616 "Invalid group(%x) expected %x\n",
4617 aenq_e->aenq_common_desc.group,
4618 ENA_ADMIN_NOTIFICATION);
4620 switch (aenq_e->aenq_common_desc.syndrome) {
4621 case ENA_ADMIN_UPDATE_HINTS:
4622 hints = (struct ena_admin_ena_hw_hints *)
4623 (&aenq_e->inline_data_w4);
4624 ena_update_hints(adapter, hints);
4627 netif_err(adapter, drv, adapter->netdev,
4628 "Invalid aenq notification link state %d\n",
4629 aenq_e->aenq_common_desc.syndrome);
4633 /* This handler will called for unknown event group or unimplemented handlers*/
4634 static void unimplemented_aenq_handler(void *data,
4635 struct ena_admin_aenq_entry *aenq_e)
4637 struct ena_adapter *adapter = (struct ena_adapter *)data;
4639 netif_err(adapter, drv, adapter->netdev,
4640 "Unknown event was received or event with unimplemented handler\n");
4643 static struct ena_aenq_handlers aenq_handlers = {
4645 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4646 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4647 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4649 .unimplemented_handler = unimplemented_aenq_handler
4652 module_init(ena_init);
4653 module_exit(ena_cleanup);