86fd01df0d3125c2cf6e7c752d4e6ff04783f4fb
[sfrench/cifs-2.6.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
47 #include <net/ip.h>
48
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
51
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
53
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
58
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT  (5 * HZ)
61
62 #define ENA_NAPI_BUDGET 64
63
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65                 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 static struct ena_aenq_handlers aenq_handlers;
71
72 static struct workqueue_struct *ena_wq;
73
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
75
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
80
81 static void ena_tx_timeout(struct net_device *dev)
82 {
83         struct ena_adapter *adapter = netdev_priv(dev);
84
85         /* Change the state of the device to trigger reset
86          * Check that we are not in the middle or a trigger already
87          */
88
89         if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
90                 return;
91
92         adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93         u64_stats_update_begin(&adapter->syncp);
94         adapter->dev_stats.tx_timeout++;
95         u64_stats_update_end(&adapter->syncp);
96
97         netif_err(adapter, tx_err, dev, "Transmit time out\n");
98 }
99
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
101 {
102         int i;
103
104         for (i = 0; i < adapter->num_queues; i++)
105                 adapter->rx_ring[i].mtu = mtu;
106 }
107
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
109 {
110         struct ena_adapter *adapter = netdev_priv(dev);
111         int ret;
112
113         ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
114         if (!ret) {
115                 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
116                 update_rx_ring_mtu(adapter, new_mtu);
117                 dev->mtu = new_mtu;
118         } else {
119                 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
120                           new_mtu);
121         }
122
123         return ret;
124 }
125
126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
127 {
128 #ifdef CONFIG_RFS_ACCEL
129         u32 i;
130         int rc;
131
132         adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
133         if (!adapter->netdev->rx_cpu_rmap)
134                 return -ENOMEM;
135         for (i = 0; i < adapter->num_queues; i++) {
136                 int irq_idx = ENA_IO_IRQ_IDX(i);
137
138                 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
139                                       pci_irq_vector(adapter->pdev, irq_idx));
140                 if (rc) {
141                         free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
142                         adapter->netdev->rx_cpu_rmap = NULL;
143                         return rc;
144                 }
145         }
146 #endif /* CONFIG_RFS_ACCEL */
147         return 0;
148 }
149
150 static void ena_init_io_rings_common(struct ena_adapter *adapter,
151                                      struct ena_ring *ring, u16 qid)
152 {
153         ring->qid = qid;
154         ring->pdev = adapter->pdev;
155         ring->dev = &adapter->pdev->dev;
156         ring->netdev = adapter->netdev;
157         ring->napi = &adapter->ena_napi[qid].napi;
158         ring->adapter = adapter;
159         ring->ena_dev = adapter->ena_dev;
160         ring->per_napi_packets = 0;
161         ring->per_napi_bytes = 0;
162         ring->cpu = 0;
163         ring->first_interrupt = false;
164         ring->no_interrupt_event_cnt = 0;
165         u64_stats_init(&ring->syncp);
166 }
167
168 static void ena_init_io_rings(struct ena_adapter *adapter)
169 {
170         struct ena_com_dev *ena_dev;
171         struct ena_ring *txr, *rxr;
172         int i;
173
174         ena_dev = adapter->ena_dev;
175
176         for (i = 0; i < adapter->num_queues; i++) {
177                 txr = &adapter->tx_ring[i];
178                 rxr = &adapter->rx_ring[i];
179
180                 /* TX/RX common ring state */
181                 ena_init_io_rings_common(adapter, txr, i);
182                 ena_init_io_rings_common(adapter, rxr, i);
183
184                 /* TX specific ring state */
185                 txr->ring_size = adapter->tx_ring_size;
186                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
187                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
188                 txr->sgl_size = adapter->max_tx_sgl_size;
189                 txr->smoothed_interval =
190                         ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
191
192                 /* RX specific ring state */
193                 rxr->ring_size = adapter->rx_ring_size;
194                 rxr->rx_copybreak = adapter->rx_copybreak;
195                 rxr->sgl_size = adapter->max_rx_sgl_size;
196                 rxr->smoothed_interval =
197                         ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
198                 rxr->empty_rx_queue = 0;
199         }
200 }
201
202 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
203  * @adapter: network interface device structure
204  * @qid: queue index
205  *
206  * Return 0 on success, negative on failure
207  */
208 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
209 {
210         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
211         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
212         int size, i, node;
213
214         if (tx_ring->tx_buffer_info) {
215                 netif_err(adapter, ifup,
216                           adapter->netdev, "tx_buffer_info info is not NULL");
217                 return -EEXIST;
218         }
219
220         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
221         node = cpu_to_node(ena_irq->cpu);
222
223         tx_ring->tx_buffer_info = vzalloc_node(size, node);
224         if (!tx_ring->tx_buffer_info) {
225                 tx_ring->tx_buffer_info = vzalloc(size);
226                 if (!tx_ring->tx_buffer_info)
227                         return -ENOMEM;
228         }
229
230         size = sizeof(u16) * tx_ring->ring_size;
231         tx_ring->free_tx_ids = vzalloc_node(size, node);
232         if (!tx_ring->free_tx_ids) {
233                 tx_ring->free_tx_ids = vzalloc(size);
234                 if (!tx_ring->free_tx_ids) {
235                         vfree(tx_ring->tx_buffer_info);
236                         return -ENOMEM;
237                 }
238         }
239
240         /* Req id ring for TX out of order completions */
241         for (i = 0; i < tx_ring->ring_size; i++)
242                 tx_ring->free_tx_ids[i] = i;
243
244         /* Reset tx statistics */
245         memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
246
247         tx_ring->next_to_use = 0;
248         tx_ring->next_to_clean = 0;
249         tx_ring->cpu = ena_irq->cpu;
250         return 0;
251 }
252
253 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
254  * @adapter: network interface device structure
255  * @qid: queue index
256  *
257  * Free all transmit software resources
258  */
259 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
260 {
261         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
262
263         vfree(tx_ring->tx_buffer_info);
264         tx_ring->tx_buffer_info = NULL;
265
266         vfree(tx_ring->free_tx_ids);
267         tx_ring->free_tx_ids = NULL;
268 }
269
270 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
271  * @adapter: private structure
272  *
273  * Return 0 on success, negative on failure
274  */
275 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
276 {
277         int i, rc = 0;
278
279         for (i = 0; i < adapter->num_queues; i++) {
280                 rc = ena_setup_tx_resources(adapter, i);
281                 if (rc)
282                         goto err_setup_tx;
283         }
284
285         return 0;
286
287 err_setup_tx:
288
289         netif_err(adapter, ifup, adapter->netdev,
290                   "Tx queue %d: allocation failed\n", i);
291
292         /* rewind the index freeing the rings as we go */
293         while (i--)
294                 ena_free_tx_resources(adapter, i);
295         return rc;
296 }
297
298 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
299  * @adapter: board private structure
300  *
301  * Free all transmit software resources
302  */
303 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
304 {
305         int i;
306
307         for (i = 0; i < adapter->num_queues; i++)
308                 ena_free_tx_resources(adapter, i);
309 }
310
311 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
312 {
313         if (likely(req_id < rx_ring->ring_size))
314                 return 0;
315
316         netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
317                   "Invalid rx req_id: %hu\n", req_id);
318
319         u64_stats_update_begin(&rx_ring->syncp);
320         rx_ring->rx_stats.bad_req_id++;
321         u64_stats_update_end(&rx_ring->syncp);
322
323         /* Trigger device reset */
324         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
325         set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
326         return -EFAULT;
327 }
328
329 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
330  * @adapter: network interface device structure
331  * @qid: queue index
332  *
333  * Returns 0 on success, negative on failure
334  */
335 static int ena_setup_rx_resources(struct ena_adapter *adapter,
336                                   u32 qid)
337 {
338         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
339         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
340         int size, node, i;
341
342         if (rx_ring->rx_buffer_info) {
343                 netif_err(adapter, ifup, adapter->netdev,
344                           "rx_buffer_info is not NULL");
345                 return -EEXIST;
346         }
347
348         /* alloc extra element so in rx path
349          * we can always prefetch rx_info + 1
350          */
351         size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
352         node = cpu_to_node(ena_irq->cpu);
353
354         rx_ring->rx_buffer_info = vzalloc_node(size, node);
355         if (!rx_ring->rx_buffer_info) {
356                 rx_ring->rx_buffer_info = vzalloc(size);
357                 if (!rx_ring->rx_buffer_info)
358                         return -ENOMEM;
359         }
360
361         size = sizeof(u16) * rx_ring->ring_size;
362         rx_ring->free_rx_ids = vzalloc_node(size, node);
363         if (!rx_ring->free_rx_ids) {
364                 rx_ring->free_rx_ids = vzalloc(size);
365                 if (!rx_ring->free_rx_ids) {
366                         vfree(rx_ring->rx_buffer_info);
367                         return -ENOMEM;
368                 }
369         }
370
371         /* Req id ring for receiving RX pkts out of order */
372         for (i = 0; i < rx_ring->ring_size; i++)
373                 rx_ring->free_rx_ids[i] = i;
374
375         /* Reset rx statistics */
376         memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
377
378         rx_ring->next_to_clean = 0;
379         rx_ring->next_to_use = 0;
380         rx_ring->cpu = ena_irq->cpu;
381
382         return 0;
383 }
384
385 /* ena_free_rx_resources - Free I/O Rx Resources
386  * @adapter: network interface device structure
387  * @qid: queue index
388  *
389  * Free all receive software resources
390  */
391 static void ena_free_rx_resources(struct ena_adapter *adapter,
392                                   u32 qid)
393 {
394         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
395
396         vfree(rx_ring->rx_buffer_info);
397         rx_ring->rx_buffer_info = NULL;
398
399         vfree(rx_ring->free_rx_ids);
400         rx_ring->free_rx_ids = NULL;
401 }
402
403 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
404  * @adapter: board private structure
405  *
406  * Return 0 on success, negative on failure
407  */
408 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
409 {
410         int i, rc = 0;
411
412         for (i = 0; i < adapter->num_queues; i++) {
413                 rc = ena_setup_rx_resources(adapter, i);
414                 if (rc)
415                         goto err_setup_rx;
416         }
417
418         return 0;
419
420 err_setup_rx:
421
422         netif_err(adapter, ifup, adapter->netdev,
423                   "Rx queue %d: allocation failed\n", i);
424
425         /* rewind the index freeing the rings as we go */
426         while (i--)
427                 ena_free_rx_resources(adapter, i);
428         return rc;
429 }
430
431 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
432  * @adapter: board private structure
433  *
434  * Free all receive software resources
435  */
436 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
437 {
438         int i;
439
440         for (i = 0; i < adapter->num_queues; i++)
441                 ena_free_rx_resources(adapter, i);
442 }
443
444 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
445                                     struct ena_rx_buffer *rx_info, gfp_t gfp)
446 {
447         struct ena_com_buf *ena_buf;
448         struct page *page;
449         dma_addr_t dma;
450
451         /* if previous allocated page is not used */
452         if (unlikely(rx_info->page))
453                 return 0;
454
455         page = alloc_page(gfp);
456         if (unlikely(!page)) {
457                 u64_stats_update_begin(&rx_ring->syncp);
458                 rx_ring->rx_stats.page_alloc_fail++;
459                 u64_stats_update_end(&rx_ring->syncp);
460                 return -ENOMEM;
461         }
462
463         dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
464                            DMA_FROM_DEVICE);
465         if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
466                 u64_stats_update_begin(&rx_ring->syncp);
467                 rx_ring->rx_stats.dma_mapping_err++;
468                 u64_stats_update_end(&rx_ring->syncp);
469
470                 __free_page(page);
471                 return -EIO;
472         }
473         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
474                   "alloc page %p, rx_info %p\n", page, rx_info);
475
476         rx_info->page = page;
477         rx_info->page_offset = 0;
478         ena_buf = &rx_info->ena_buf;
479         ena_buf->paddr = dma;
480         ena_buf->len = ENA_PAGE_SIZE;
481
482         return 0;
483 }
484
485 static void ena_free_rx_page(struct ena_ring *rx_ring,
486                              struct ena_rx_buffer *rx_info)
487 {
488         struct page *page = rx_info->page;
489         struct ena_com_buf *ena_buf = &rx_info->ena_buf;
490
491         if (unlikely(!page)) {
492                 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
493                            "Trying to free unallocated buffer\n");
494                 return;
495         }
496
497         dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
498                        DMA_FROM_DEVICE);
499
500         __free_page(page);
501         rx_info->page = NULL;
502 }
503
504 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
505 {
506         u16 next_to_use, req_id;
507         u32 i;
508         int rc;
509
510         next_to_use = rx_ring->next_to_use;
511
512         for (i = 0; i < num; i++) {
513                 struct ena_rx_buffer *rx_info;
514
515                 req_id = rx_ring->free_rx_ids[next_to_use];
516                 rc = validate_rx_req_id(rx_ring, req_id);
517                 if (unlikely(rc < 0))
518                         break;
519
520                 rx_info = &rx_ring->rx_buffer_info[req_id];
521
522
523                 rc = ena_alloc_rx_page(rx_ring, rx_info,
524                                        GFP_ATOMIC | __GFP_COMP);
525                 if (unlikely(rc < 0)) {
526                         netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
527                                    "failed to alloc buffer for rx queue %d\n",
528                                    rx_ring->qid);
529                         break;
530                 }
531                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
532                                                 &rx_info->ena_buf,
533                                                 req_id);
534                 if (unlikely(rc)) {
535                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
536                                    "failed to add buffer for rx queue %d\n",
537                                    rx_ring->qid);
538                         break;
539                 }
540                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
541                                                    rx_ring->ring_size);
542         }
543
544         if (unlikely(i < num)) {
545                 u64_stats_update_begin(&rx_ring->syncp);
546                 rx_ring->rx_stats.refil_partial++;
547                 u64_stats_update_end(&rx_ring->syncp);
548                 netdev_warn(rx_ring->netdev,
549                             "refilled rx qid %d with only %d buffers (from %d)\n",
550                             rx_ring->qid, i, num);
551         }
552
553         /* ena_com_write_sq_doorbell issues a wmb() */
554         if (likely(i))
555                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
556
557         rx_ring->next_to_use = next_to_use;
558
559         return i;
560 }
561
562 static void ena_free_rx_bufs(struct ena_adapter *adapter,
563                              u32 qid)
564 {
565         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
566         u32 i;
567
568         for (i = 0; i < rx_ring->ring_size; i++) {
569                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
570
571                 if (rx_info->page)
572                         ena_free_rx_page(rx_ring, rx_info);
573         }
574 }
575
576 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
577  * @adapter: board private structure
578  *
579  */
580 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
581 {
582         struct ena_ring *rx_ring;
583         int i, rc, bufs_num;
584
585         for (i = 0; i < adapter->num_queues; i++) {
586                 rx_ring = &adapter->rx_ring[i];
587                 bufs_num = rx_ring->ring_size - 1;
588                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
589
590                 if (unlikely(rc != bufs_num))
591                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
592                                    "refilling Queue %d failed. allocated %d buffers from: %d\n",
593                                    i, rc, bufs_num);
594         }
595 }
596
597 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
598 {
599         int i;
600
601         for (i = 0; i < adapter->num_queues; i++)
602                 ena_free_rx_bufs(adapter, i);
603 }
604
605 /* ena_free_tx_bufs - Free Tx Buffers per Queue
606  * @tx_ring: TX ring for which buffers be freed
607  */
608 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
609 {
610         bool print_once = true;
611         u32 i;
612
613         for (i = 0; i < tx_ring->ring_size; i++) {
614                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
615                 struct ena_com_buf *ena_buf;
616                 int nr_frags;
617                 int j;
618
619                 if (!tx_info->skb)
620                         continue;
621
622                 if (print_once) {
623                         netdev_notice(tx_ring->netdev,
624                                       "free uncompleted tx skb qid %d idx 0x%x\n",
625                                       tx_ring->qid, i);
626                         print_once = false;
627                 } else {
628                         netdev_dbg(tx_ring->netdev,
629                                    "free uncompleted tx skb qid %d idx 0x%x\n",
630                                    tx_ring->qid, i);
631                 }
632
633                 ena_buf = tx_info->bufs;
634                 dma_unmap_single(tx_ring->dev,
635                                  ena_buf->paddr,
636                                  ena_buf->len,
637                                  DMA_TO_DEVICE);
638
639                 /* unmap remaining mapped pages */
640                 nr_frags = tx_info->num_of_bufs - 1;
641                 for (j = 0; j < nr_frags; j++) {
642                         ena_buf++;
643                         dma_unmap_page(tx_ring->dev,
644                                        ena_buf->paddr,
645                                        ena_buf->len,
646                                        DMA_TO_DEVICE);
647                 }
648
649                 dev_kfree_skb_any(tx_info->skb);
650         }
651         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
652                                                   tx_ring->qid));
653 }
654
655 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
656 {
657         struct ena_ring *tx_ring;
658         int i;
659
660         for (i = 0; i < adapter->num_queues; i++) {
661                 tx_ring = &adapter->tx_ring[i];
662                 ena_free_tx_bufs(tx_ring);
663         }
664 }
665
666 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
667 {
668         u16 ena_qid;
669         int i;
670
671         for (i = 0; i < adapter->num_queues; i++) {
672                 ena_qid = ENA_IO_TXQ_IDX(i);
673                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
674         }
675 }
676
677 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
678 {
679         u16 ena_qid;
680         int i;
681
682         for (i = 0; i < adapter->num_queues; i++) {
683                 ena_qid = ENA_IO_RXQ_IDX(i);
684                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
685         }
686 }
687
688 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
689 {
690         ena_destroy_all_tx_queues(adapter);
691         ena_destroy_all_rx_queues(adapter);
692 }
693
694 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
695 {
696         struct ena_tx_buffer *tx_info = NULL;
697
698         if (likely(req_id < tx_ring->ring_size)) {
699                 tx_info = &tx_ring->tx_buffer_info[req_id];
700                 if (likely(tx_info->skb))
701                         return 0;
702         }
703
704         if (tx_info)
705                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
706                           "tx_info doesn't have valid skb\n");
707         else
708                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
709                           "Invalid req_id: %hu\n", req_id);
710
711         u64_stats_update_begin(&tx_ring->syncp);
712         tx_ring->tx_stats.bad_req_id++;
713         u64_stats_update_end(&tx_ring->syncp);
714
715         /* Trigger device reset */
716         tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
717         set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
718         return -EFAULT;
719 }
720
721 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
722 {
723         struct netdev_queue *txq;
724         bool above_thresh;
725         u32 tx_bytes = 0;
726         u32 total_done = 0;
727         u16 next_to_clean;
728         u16 req_id;
729         int tx_pkts = 0;
730         int rc;
731
732         next_to_clean = tx_ring->next_to_clean;
733         txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
734
735         while (tx_pkts < budget) {
736                 struct ena_tx_buffer *tx_info;
737                 struct sk_buff *skb;
738                 struct ena_com_buf *ena_buf;
739                 int i, nr_frags;
740
741                 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
742                                                 &req_id);
743                 if (rc)
744                         break;
745
746                 rc = validate_tx_req_id(tx_ring, req_id);
747                 if (rc)
748                         break;
749
750                 tx_info = &tx_ring->tx_buffer_info[req_id];
751                 skb = tx_info->skb;
752
753                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
754                 prefetch(&skb->end);
755
756                 tx_info->skb = NULL;
757                 tx_info->last_jiffies = 0;
758
759                 if (likely(tx_info->num_of_bufs != 0)) {
760                         ena_buf = tx_info->bufs;
761
762                         dma_unmap_single(tx_ring->dev,
763                                          dma_unmap_addr(ena_buf, paddr),
764                                          dma_unmap_len(ena_buf, len),
765                                          DMA_TO_DEVICE);
766
767                         /* unmap remaining mapped pages */
768                         nr_frags = tx_info->num_of_bufs - 1;
769                         for (i = 0; i < nr_frags; i++) {
770                                 ena_buf++;
771                                 dma_unmap_page(tx_ring->dev,
772                                                dma_unmap_addr(ena_buf, paddr),
773                                                dma_unmap_len(ena_buf, len),
774                                                DMA_TO_DEVICE);
775                         }
776                 }
777
778                 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
779                           "tx_poll: q %d skb %p completed\n", tx_ring->qid,
780                           skb);
781
782                 tx_bytes += skb->len;
783                 dev_kfree_skb(skb);
784                 tx_pkts++;
785                 total_done += tx_info->tx_descs;
786
787                 tx_ring->free_tx_ids[next_to_clean] = req_id;
788                 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
789                                                      tx_ring->ring_size);
790         }
791
792         tx_ring->next_to_clean = next_to_clean;
793         ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
794         ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
795
796         netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
797
798         netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
799                   "tx_poll: q %d done. total pkts: %d\n",
800                   tx_ring->qid, tx_pkts);
801
802         /* need to make the rings circular update visible to
803          * ena_start_xmit() before checking for netif_queue_stopped().
804          */
805         smp_mb();
806
807         above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
808                 ENA_TX_WAKEUP_THRESH;
809         if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
810                 __netif_tx_lock(txq, smp_processor_id());
811                 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
812                         ENA_TX_WAKEUP_THRESH;
813                 if (netif_tx_queue_stopped(txq) && above_thresh) {
814                         netif_tx_wake_queue(txq);
815                         u64_stats_update_begin(&tx_ring->syncp);
816                         tx_ring->tx_stats.queue_wakeup++;
817                         u64_stats_update_end(&tx_ring->syncp);
818                 }
819                 __netif_tx_unlock(txq);
820         }
821
822         tx_ring->per_napi_bytes += tx_bytes;
823         tx_ring->per_napi_packets += tx_pkts;
824
825         return tx_pkts;
826 }
827
828 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
829 {
830         struct sk_buff *skb;
831
832         if (frags)
833                 skb = napi_get_frags(rx_ring->napi);
834         else
835                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
836                                                 rx_ring->rx_copybreak);
837
838         if (unlikely(!skb)) {
839                 u64_stats_update_begin(&rx_ring->syncp);
840                 rx_ring->rx_stats.skb_alloc_fail++;
841                 u64_stats_update_end(&rx_ring->syncp);
842                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
843                           "Failed to allocate skb. frags: %d\n", frags);
844                 return NULL;
845         }
846
847         return skb;
848 }
849
850 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
851                                   struct ena_com_rx_buf_info *ena_bufs,
852                                   u32 descs,
853                                   u16 *next_to_clean)
854 {
855         struct sk_buff *skb;
856         struct ena_rx_buffer *rx_info;
857         u16 len, req_id, buf = 0;
858         void *va;
859
860         len = ena_bufs[buf].len;
861         req_id = ena_bufs[buf].req_id;
862         rx_info = &rx_ring->rx_buffer_info[req_id];
863
864         if (unlikely(!rx_info->page)) {
865                 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
866                           "Page is NULL\n");
867                 return NULL;
868         }
869
870         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
871                   "rx_info %p page %p\n",
872                   rx_info, rx_info->page);
873
874         /* save virt address of first buffer */
875         va = page_address(rx_info->page) + rx_info->page_offset;
876         prefetch(va + NET_IP_ALIGN);
877
878         if (len <= rx_ring->rx_copybreak) {
879                 skb = ena_alloc_skb(rx_ring, false);
880                 if (unlikely(!skb))
881                         return NULL;
882
883                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
884                           "rx allocated small packet. len %d. data_len %d\n",
885                           skb->len, skb->data_len);
886
887                 /* sync this buffer for CPU use */
888                 dma_sync_single_for_cpu(rx_ring->dev,
889                                         dma_unmap_addr(&rx_info->ena_buf, paddr),
890                                         len,
891                                         DMA_FROM_DEVICE);
892                 skb_copy_to_linear_data(skb, va, len);
893                 dma_sync_single_for_device(rx_ring->dev,
894                                            dma_unmap_addr(&rx_info->ena_buf, paddr),
895                                            len,
896                                            DMA_FROM_DEVICE);
897
898                 skb_put(skb, len);
899                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
900                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
901                 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
902                                                      rx_ring->ring_size);
903                 return skb;
904         }
905
906         skb = ena_alloc_skb(rx_ring, true);
907         if (unlikely(!skb))
908                 return NULL;
909
910         do {
911                 dma_unmap_page(rx_ring->dev,
912                                dma_unmap_addr(&rx_info->ena_buf, paddr),
913                                ENA_PAGE_SIZE, DMA_FROM_DEVICE);
914
915                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
916                                 rx_info->page_offset, len, ENA_PAGE_SIZE);
917
918                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
919                           "rx skb updated. len %d. data_len %d\n",
920                           skb->len, skb->data_len);
921
922                 rx_info->page = NULL;
923
924                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
925                 *next_to_clean =
926                         ENA_RX_RING_IDX_NEXT(*next_to_clean,
927                                              rx_ring->ring_size);
928                 if (likely(--descs == 0))
929                         break;
930
931                 buf++;
932                 len = ena_bufs[buf].len;
933                 req_id = ena_bufs[buf].req_id;
934                 rx_info = &rx_ring->rx_buffer_info[req_id];
935         } while (1);
936
937         return skb;
938 }
939
940 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
941  * @adapter: structure containing adapter specific data
942  * @ena_rx_ctx: received packet context/metadata
943  * @skb: skb currently being received and modified
944  */
945 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
946                                    struct ena_com_rx_ctx *ena_rx_ctx,
947                                    struct sk_buff *skb)
948 {
949         /* Rx csum disabled */
950         if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
951                 skb->ip_summed = CHECKSUM_NONE;
952                 return;
953         }
954
955         /* For fragmented packets the checksum isn't valid */
956         if (ena_rx_ctx->frag) {
957                 skb->ip_summed = CHECKSUM_NONE;
958                 return;
959         }
960
961         /* if IP and error */
962         if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
963                      (ena_rx_ctx->l3_csum_err))) {
964                 /* ipv4 checksum error */
965                 skb->ip_summed = CHECKSUM_NONE;
966                 u64_stats_update_begin(&rx_ring->syncp);
967                 rx_ring->rx_stats.bad_csum++;
968                 u64_stats_update_end(&rx_ring->syncp);
969                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970                           "RX IPv4 header checksum error\n");
971                 return;
972         }
973
974         /* if TCP/UDP */
975         if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
976                    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
977                 if (unlikely(ena_rx_ctx->l4_csum_err)) {
978                         /* TCP/UDP checksum error */
979                         u64_stats_update_begin(&rx_ring->syncp);
980                         rx_ring->rx_stats.bad_csum++;
981                         u64_stats_update_end(&rx_ring->syncp);
982                         netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983                                   "RX L4 checksum error\n");
984                         skb->ip_summed = CHECKSUM_NONE;
985                         return;
986                 }
987
988                 skb->ip_summed = CHECKSUM_UNNECESSARY;
989         }
990 }
991
992 static void ena_set_rx_hash(struct ena_ring *rx_ring,
993                             struct ena_com_rx_ctx *ena_rx_ctx,
994                             struct sk_buff *skb)
995 {
996         enum pkt_hash_types hash_type;
997
998         if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
999                 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1000                            (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1001
1002                         hash_type = PKT_HASH_TYPE_L4;
1003                 else
1004                         hash_type = PKT_HASH_TYPE_NONE;
1005
1006                 /* Override hash type if the packet is fragmented */
1007                 if (ena_rx_ctx->frag)
1008                         hash_type = PKT_HASH_TYPE_NONE;
1009
1010                 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1011         }
1012 }
1013
1014 /* ena_clean_rx_irq - Cleanup RX irq
1015  * @rx_ring: RX ring to clean
1016  * @napi: napi handler
1017  * @budget: how many packets driver is allowed to clean
1018  *
1019  * Returns the number of cleaned buffers.
1020  */
1021 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1022                             u32 budget)
1023 {
1024         u16 next_to_clean = rx_ring->next_to_clean;
1025         u32 res_budget, work_done;
1026
1027         struct ena_com_rx_ctx ena_rx_ctx;
1028         struct ena_adapter *adapter;
1029         struct sk_buff *skb;
1030         int refill_required;
1031         int refill_threshold;
1032         int rc = 0;
1033         int total_len = 0;
1034         int rx_copybreak_pkt = 0;
1035         int i;
1036
1037         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1038                   "%s qid %d\n", __func__, rx_ring->qid);
1039         res_budget = budget;
1040
1041         do {
1042                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1043                 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1044                 ena_rx_ctx.descs = 0;
1045                 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1046                                     rx_ring->ena_com_io_sq,
1047                                     &ena_rx_ctx);
1048                 if (unlikely(rc))
1049                         goto error;
1050
1051                 if (unlikely(ena_rx_ctx.descs == 0))
1052                         break;
1053
1054                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1055                           "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1056                           rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1057                           ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1058
1059                 /* allocate skb and fill it */
1060                 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1061                                  &next_to_clean);
1062
1063                 /* exit if we failed to retrieve a buffer */
1064                 if (unlikely(!skb)) {
1065                         for (i = 0; i < ena_rx_ctx.descs; i++) {
1066                                 rx_ring->free_tx_ids[next_to_clean] =
1067                                         rx_ring->ena_bufs[i].req_id;
1068                                 next_to_clean =
1069                                         ENA_RX_RING_IDX_NEXT(next_to_clean,
1070                                                              rx_ring->ring_size);
1071                         }
1072                         break;
1073                 }
1074
1075                 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1076
1077                 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1078
1079                 skb_record_rx_queue(skb, rx_ring->qid);
1080
1081                 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1082                         total_len += rx_ring->ena_bufs[0].len;
1083                         rx_copybreak_pkt++;
1084                         napi_gro_receive(napi, skb);
1085                 } else {
1086                         total_len += skb->len;
1087                         napi_gro_frags(napi);
1088                 }
1089
1090                 res_budget--;
1091         } while (likely(res_budget));
1092
1093         work_done = budget - res_budget;
1094         rx_ring->per_napi_bytes += total_len;
1095         rx_ring->per_napi_packets += work_done;
1096         u64_stats_update_begin(&rx_ring->syncp);
1097         rx_ring->rx_stats.bytes += total_len;
1098         rx_ring->rx_stats.cnt += work_done;
1099         rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1100         u64_stats_update_end(&rx_ring->syncp);
1101
1102         rx_ring->next_to_clean = next_to_clean;
1103
1104         refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1105         refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1106
1107         /* Optimization, try to batch new rx buffers */
1108         if (refill_required > refill_threshold) {
1109                 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1110                 ena_refill_rx_bufs(rx_ring, refill_required);
1111         }
1112
1113         return work_done;
1114
1115 error:
1116         adapter = netdev_priv(rx_ring->netdev);
1117
1118         u64_stats_update_begin(&rx_ring->syncp);
1119         rx_ring->rx_stats.bad_desc_num++;
1120         u64_stats_update_end(&rx_ring->syncp);
1121
1122         /* Too many desc from the device. Trigger reset */
1123         adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1124         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1125
1126         return 0;
1127 }
1128
1129 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1130                                        struct ena_ring *tx_ring)
1131 {
1132         /* We apply adaptive moderation on Rx path only.
1133          * Tx uses static interrupt moderation.
1134          */
1135         ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1136                                           rx_ring->per_napi_packets,
1137                                           rx_ring->per_napi_bytes,
1138                                           &rx_ring->smoothed_interval,
1139                                           &rx_ring->moder_tbl_idx);
1140
1141         /* Reset per napi packets/bytes */
1142         tx_ring->per_napi_packets = 0;
1143         tx_ring->per_napi_bytes = 0;
1144         rx_ring->per_napi_packets = 0;
1145         rx_ring->per_napi_bytes = 0;
1146 }
1147
1148 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1149                                         struct ena_ring *rx_ring)
1150 {
1151         struct ena_eth_io_intr_reg intr_reg;
1152
1153         /* Update intr register: rx intr delay,
1154          * tx intr delay and interrupt unmask
1155          */
1156         ena_com_update_intr_reg(&intr_reg,
1157                                 rx_ring->smoothed_interval,
1158                                 tx_ring->smoothed_interval,
1159                                 true);
1160
1161         /* It is a shared MSI-X.
1162          * Tx and Rx CQ have pointer to it.
1163          * So we use one of them to reach the intr reg
1164          */
1165         ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1166 }
1167
1168 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1169                                              struct ena_ring *rx_ring)
1170 {
1171         int cpu = get_cpu();
1172         int numa_node;
1173
1174         /* Check only one ring since the 2 rings are running on the same cpu */
1175         if (likely(tx_ring->cpu == cpu))
1176                 goto out;
1177
1178         numa_node = cpu_to_node(cpu);
1179         put_cpu();
1180
1181         if (numa_node != NUMA_NO_NODE) {
1182                 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1183                 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1184         }
1185
1186         tx_ring->cpu = cpu;
1187         rx_ring->cpu = cpu;
1188
1189         return;
1190 out:
1191         put_cpu();
1192 }
1193
1194 static int ena_io_poll(struct napi_struct *napi, int budget)
1195 {
1196         struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1197         struct ena_ring *tx_ring, *rx_ring;
1198
1199         u32 tx_work_done;
1200         u32 rx_work_done;
1201         int tx_budget;
1202         int napi_comp_call = 0;
1203         int ret;
1204
1205         tx_ring = ena_napi->tx_ring;
1206         rx_ring = ena_napi->rx_ring;
1207
1208         tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1209
1210         if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1211             test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1212                 napi_complete_done(napi, 0);
1213                 return 0;
1214         }
1215
1216         tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1217         rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1218
1219         /* If the device is about to reset or down, avoid unmask
1220          * the interrupt and return 0 so NAPI won't reschedule
1221          */
1222         if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1223                      test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1224                 napi_complete_done(napi, 0);
1225                 ret = 0;
1226
1227         } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1228                 napi_comp_call = 1;
1229
1230                 /* Update numa and unmask the interrupt only when schedule
1231                  * from the interrupt context (vs from sk_busy_loop)
1232                  */
1233                 if (napi_complete_done(napi, rx_work_done)) {
1234                         /* Tx and Rx share the same interrupt vector */
1235                         if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1236                                 ena_adjust_intr_moderation(rx_ring, tx_ring);
1237
1238                         ena_unmask_interrupt(tx_ring, rx_ring);
1239                 }
1240
1241                 ena_update_ring_numa_node(tx_ring, rx_ring);
1242
1243                 ret = rx_work_done;
1244         } else {
1245                 ret = budget;
1246         }
1247
1248         u64_stats_update_begin(&tx_ring->syncp);
1249         tx_ring->tx_stats.napi_comp += napi_comp_call;
1250         tx_ring->tx_stats.tx_poll++;
1251         u64_stats_update_end(&tx_ring->syncp);
1252
1253         return ret;
1254 }
1255
1256 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1257 {
1258         struct ena_adapter *adapter = (struct ena_adapter *)data;
1259
1260         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1261
1262         /* Don't call the aenq handler before probe is done */
1263         if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1264                 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1265
1266         return IRQ_HANDLED;
1267 }
1268
1269 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1270  * @irq: interrupt number
1271  * @data: pointer to a network interface private napi device structure
1272  */
1273 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1274 {
1275         struct ena_napi *ena_napi = data;
1276
1277         ena_napi->tx_ring->first_interrupt = true;
1278         ena_napi->rx_ring->first_interrupt = true;
1279
1280         napi_schedule_irqoff(&ena_napi->napi);
1281
1282         return IRQ_HANDLED;
1283 }
1284
1285 /* Reserve a single MSI-X vector for management (admin + aenq).
1286  * plus reserve one vector for each potential io queue.
1287  * the number of potential io queues is the minimum of what the device
1288  * supports and the number of vCPUs.
1289  */
1290 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1291 {
1292         int msix_vecs, irq_cnt;
1293
1294         if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1295                 netif_err(adapter, probe, adapter->netdev,
1296                           "Error, MSI-X is already enabled\n");
1297                 return -EPERM;
1298         }
1299
1300         /* Reserved the max msix vectors we might need */
1301         msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1302
1303         netif_dbg(adapter, probe, adapter->netdev,
1304                   "trying to enable MSI-X, vectors %d\n", msix_vecs);
1305
1306         irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1307                                         msix_vecs, PCI_IRQ_MSIX);
1308
1309         if (irq_cnt < 0) {
1310                 netif_err(adapter, probe, adapter->netdev,
1311                           "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1312                 return -ENOSPC;
1313         }
1314
1315         if (irq_cnt != msix_vecs) {
1316                 netif_notice(adapter, probe, adapter->netdev,
1317                              "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1318                              irq_cnt, msix_vecs);
1319                 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1320         }
1321
1322         if (ena_init_rx_cpu_rmap(adapter))
1323                 netif_warn(adapter, probe, adapter->netdev,
1324                            "Failed to map IRQs to CPUs\n");
1325
1326         adapter->msix_vecs = irq_cnt;
1327         set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1328
1329         return 0;
1330 }
1331
1332 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1333 {
1334         u32 cpu;
1335
1336         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1337                  ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1338                  pci_name(adapter->pdev));
1339         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1340                 ena_intr_msix_mgmnt;
1341         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1342         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1343                 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1344         cpu = cpumask_first(cpu_online_mask);
1345         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1346         cpumask_set_cpu(cpu,
1347                         &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1348 }
1349
1350 static void ena_setup_io_intr(struct ena_adapter *adapter)
1351 {
1352         struct net_device *netdev;
1353         int irq_idx, i, cpu;
1354
1355         netdev = adapter->netdev;
1356
1357         for (i = 0; i < adapter->num_queues; i++) {
1358                 irq_idx = ENA_IO_IRQ_IDX(i);
1359                 cpu = i % num_online_cpus();
1360
1361                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1362                          "%s-Tx-Rx-%d", netdev->name, i);
1363                 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1364                 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1365                 adapter->irq_tbl[irq_idx].vector =
1366                         pci_irq_vector(adapter->pdev, irq_idx);
1367                 adapter->irq_tbl[irq_idx].cpu = cpu;
1368
1369                 cpumask_set_cpu(cpu,
1370                                 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1371         }
1372 }
1373
1374 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1375 {
1376         unsigned long flags = 0;
1377         struct ena_irq *irq;
1378         int rc;
1379
1380         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1381         rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1382                          irq->data);
1383         if (rc) {
1384                 netif_err(adapter, probe, adapter->netdev,
1385                           "failed to request admin irq\n");
1386                 return rc;
1387         }
1388
1389         netif_dbg(adapter, probe, adapter->netdev,
1390                   "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1391                   irq->affinity_hint_mask.bits[0], irq->vector);
1392
1393         irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1394
1395         return rc;
1396 }
1397
1398 static int ena_request_io_irq(struct ena_adapter *adapter)
1399 {
1400         unsigned long flags = 0;
1401         struct ena_irq *irq;
1402         int rc = 0, i, k;
1403
1404         if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1405                 netif_err(adapter, ifup, adapter->netdev,
1406                           "Failed to request I/O IRQ: MSI-X is not enabled\n");
1407                 return -EINVAL;
1408         }
1409
1410         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1411                 irq = &adapter->irq_tbl[i];
1412                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1413                                  irq->data);
1414                 if (rc) {
1415                         netif_err(adapter, ifup, adapter->netdev,
1416                                   "Failed to request I/O IRQ. index %d rc %d\n",
1417                                    i, rc);
1418                         goto err;
1419                 }
1420
1421                 netif_dbg(adapter, ifup, adapter->netdev,
1422                           "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1423                           i, irq->affinity_hint_mask.bits[0], irq->vector);
1424
1425                 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1426         }
1427
1428         return rc;
1429
1430 err:
1431         for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1432                 irq = &adapter->irq_tbl[k];
1433                 free_irq(irq->vector, irq->data);
1434         }
1435
1436         return rc;
1437 }
1438
1439 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1440 {
1441         struct ena_irq *irq;
1442
1443         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1444         synchronize_irq(irq->vector);
1445         irq_set_affinity_hint(irq->vector, NULL);
1446         free_irq(irq->vector, irq->data);
1447 }
1448
1449 static void ena_free_io_irq(struct ena_adapter *adapter)
1450 {
1451         struct ena_irq *irq;
1452         int i;
1453
1454 #ifdef CONFIG_RFS_ACCEL
1455         if (adapter->msix_vecs >= 1) {
1456                 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1457                 adapter->netdev->rx_cpu_rmap = NULL;
1458         }
1459 #endif /* CONFIG_RFS_ACCEL */
1460
1461         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1462                 irq = &adapter->irq_tbl[i];
1463                 irq_set_affinity_hint(irq->vector, NULL);
1464                 free_irq(irq->vector, irq->data);
1465         }
1466 }
1467
1468 static void ena_disable_msix(struct ena_adapter *adapter)
1469 {
1470         if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1471                 pci_free_irq_vectors(adapter->pdev);
1472 }
1473
1474 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1475 {
1476         int i;
1477
1478         if (!netif_running(adapter->netdev))
1479                 return;
1480
1481         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1482                 synchronize_irq(adapter->irq_tbl[i].vector);
1483 }
1484
1485 static void ena_del_napi(struct ena_adapter *adapter)
1486 {
1487         int i;
1488
1489         for (i = 0; i < adapter->num_queues; i++)
1490                 netif_napi_del(&adapter->ena_napi[i].napi);
1491 }
1492
1493 static void ena_init_napi(struct ena_adapter *adapter)
1494 {
1495         struct ena_napi *napi;
1496         int i;
1497
1498         for (i = 0; i < adapter->num_queues; i++) {
1499                 napi = &adapter->ena_napi[i];
1500
1501                 netif_napi_add(adapter->netdev,
1502                                &adapter->ena_napi[i].napi,
1503                                ena_io_poll,
1504                                ENA_NAPI_BUDGET);
1505                 napi->rx_ring = &adapter->rx_ring[i];
1506                 napi->tx_ring = &adapter->tx_ring[i];
1507                 napi->qid = i;
1508         }
1509 }
1510
1511 static void ena_napi_disable_all(struct ena_adapter *adapter)
1512 {
1513         int i;
1514
1515         for (i = 0; i < adapter->num_queues; i++)
1516                 napi_disable(&adapter->ena_napi[i].napi);
1517 }
1518
1519 static void ena_napi_enable_all(struct ena_adapter *adapter)
1520 {
1521         int i;
1522
1523         for (i = 0; i < adapter->num_queues; i++)
1524                 napi_enable(&adapter->ena_napi[i].napi);
1525 }
1526
1527 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1528 {
1529         adapter->tx_usecs = 0;
1530         adapter->rx_usecs = 0;
1531         adapter->tx_frames = 1;
1532         adapter->rx_frames = 1;
1533 }
1534
1535 /* Configure the Rx forwarding */
1536 static int ena_rss_configure(struct ena_adapter *adapter)
1537 {
1538         struct ena_com_dev *ena_dev = adapter->ena_dev;
1539         int rc;
1540
1541         /* In case the RSS table wasn't initialized by probe */
1542         if (!ena_dev->rss.tbl_log_size) {
1543                 rc = ena_rss_init_default(adapter);
1544                 if (rc && (rc != -EOPNOTSUPP)) {
1545                         netif_err(adapter, ifup, adapter->netdev,
1546                                   "Failed to init RSS rc: %d\n", rc);
1547                         return rc;
1548                 }
1549         }
1550
1551         /* Set indirect table */
1552         rc = ena_com_indirect_table_set(ena_dev);
1553         if (unlikely(rc && rc != -EOPNOTSUPP))
1554                 return rc;
1555
1556         /* Configure hash function (if supported) */
1557         rc = ena_com_set_hash_function(ena_dev);
1558         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1559                 return rc;
1560
1561         /* Configure hash inputs (if supported) */
1562         rc = ena_com_set_hash_ctrl(ena_dev);
1563         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1564                 return rc;
1565
1566         return 0;
1567 }
1568
1569 static int ena_up_complete(struct ena_adapter *adapter)
1570 {
1571         int rc;
1572
1573         rc = ena_rss_configure(adapter);
1574         if (rc)
1575                 return rc;
1576
1577         ena_init_napi(adapter);
1578
1579         ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1580
1581         ena_refill_all_rx_bufs(adapter);
1582
1583         /* enable transmits */
1584         netif_tx_start_all_queues(adapter->netdev);
1585
1586         ena_restore_ethtool_params(adapter);
1587
1588         ena_napi_enable_all(adapter);
1589
1590         return 0;
1591 }
1592
1593 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1594 {
1595         struct ena_com_create_io_ctx ctx = { 0 };
1596         struct ena_com_dev *ena_dev;
1597         struct ena_ring *tx_ring;
1598         u32 msix_vector;
1599         u16 ena_qid;
1600         int rc;
1601
1602         ena_dev = adapter->ena_dev;
1603
1604         tx_ring = &adapter->tx_ring[qid];
1605         msix_vector = ENA_IO_IRQ_IDX(qid);
1606         ena_qid = ENA_IO_TXQ_IDX(qid);
1607
1608         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1609         ctx.qid = ena_qid;
1610         ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1611         ctx.msix_vector = msix_vector;
1612         ctx.queue_size = adapter->tx_ring_size;
1613         ctx.numa_node = cpu_to_node(tx_ring->cpu);
1614
1615         rc = ena_com_create_io_queue(ena_dev, &ctx);
1616         if (rc) {
1617                 netif_err(adapter, ifup, adapter->netdev,
1618                           "Failed to create I/O TX queue num %d rc: %d\n",
1619                           qid, rc);
1620                 return rc;
1621         }
1622
1623         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1624                                      &tx_ring->ena_com_io_sq,
1625                                      &tx_ring->ena_com_io_cq);
1626         if (rc) {
1627                 netif_err(adapter, ifup, adapter->netdev,
1628                           "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1629                           qid, rc);
1630                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1631                 return rc;
1632         }
1633
1634         ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1635         return rc;
1636 }
1637
1638 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1639 {
1640         struct ena_com_dev *ena_dev = adapter->ena_dev;
1641         int rc, i;
1642
1643         for (i = 0; i < adapter->num_queues; i++) {
1644                 rc = ena_create_io_tx_queue(adapter, i);
1645                 if (rc)
1646                         goto create_err;
1647         }
1648
1649         return 0;
1650
1651 create_err:
1652         while (i--)
1653                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1654
1655         return rc;
1656 }
1657
1658 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1659 {
1660         struct ena_com_dev *ena_dev;
1661         struct ena_com_create_io_ctx ctx = { 0 };
1662         struct ena_ring *rx_ring;
1663         u32 msix_vector;
1664         u16 ena_qid;
1665         int rc;
1666
1667         ena_dev = adapter->ena_dev;
1668
1669         rx_ring = &adapter->rx_ring[qid];
1670         msix_vector = ENA_IO_IRQ_IDX(qid);
1671         ena_qid = ENA_IO_RXQ_IDX(qid);
1672
1673         ctx.qid = ena_qid;
1674         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1675         ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1676         ctx.msix_vector = msix_vector;
1677         ctx.queue_size = adapter->rx_ring_size;
1678         ctx.numa_node = cpu_to_node(rx_ring->cpu);
1679
1680         rc = ena_com_create_io_queue(ena_dev, &ctx);
1681         if (rc) {
1682                 netif_err(adapter, ifup, adapter->netdev,
1683                           "Failed to create I/O RX queue num %d rc: %d\n",
1684                           qid, rc);
1685                 return rc;
1686         }
1687
1688         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1689                                      &rx_ring->ena_com_io_sq,
1690                                      &rx_ring->ena_com_io_cq);
1691         if (rc) {
1692                 netif_err(adapter, ifup, adapter->netdev,
1693                           "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1694                           qid, rc);
1695                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1696                 return rc;
1697         }
1698
1699         ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1700
1701         return rc;
1702 }
1703
1704 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1705 {
1706         struct ena_com_dev *ena_dev = adapter->ena_dev;
1707         int rc, i;
1708
1709         for (i = 0; i < adapter->num_queues; i++) {
1710                 rc = ena_create_io_rx_queue(adapter, i);
1711                 if (rc)
1712                         goto create_err;
1713         }
1714
1715         return 0;
1716
1717 create_err:
1718         while (i--)
1719                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1720
1721         return rc;
1722 }
1723
1724 static int ena_up(struct ena_adapter *adapter)
1725 {
1726         int rc, i;
1727
1728         netdev_dbg(adapter->netdev, "%s\n", __func__);
1729
1730         ena_setup_io_intr(adapter);
1731
1732         rc = ena_request_io_irq(adapter);
1733         if (rc)
1734                 goto err_req_irq;
1735
1736         /* allocate transmit descriptors */
1737         rc = ena_setup_all_tx_resources(adapter);
1738         if (rc)
1739                 goto err_setup_tx;
1740
1741         /* allocate receive descriptors */
1742         rc = ena_setup_all_rx_resources(adapter);
1743         if (rc)
1744                 goto err_setup_rx;
1745
1746         /* Create TX queues */
1747         rc = ena_create_all_io_tx_queues(adapter);
1748         if (rc)
1749                 goto err_create_tx_queues;
1750
1751         /* Create RX queues */
1752         rc = ena_create_all_io_rx_queues(adapter);
1753         if (rc)
1754                 goto err_create_rx_queues;
1755
1756         rc = ena_up_complete(adapter);
1757         if (rc)
1758                 goto err_up;
1759
1760         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1761                 netif_carrier_on(adapter->netdev);
1762
1763         u64_stats_update_begin(&adapter->syncp);
1764         adapter->dev_stats.interface_up++;
1765         u64_stats_update_end(&adapter->syncp);
1766
1767         set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1768
1769         /* Enable completion queues interrupt */
1770         for (i = 0; i < adapter->num_queues; i++)
1771                 ena_unmask_interrupt(&adapter->tx_ring[i],
1772                                      &adapter->rx_ring[i]);
1773
1774         /* schedule napi in case we had pending packets
1775          * from the last time we disable napi
1776          */
1777         for (i = 0; i < adapter->num_queues; i++)
1778                 napi_schedule(&adapter->ena_napi[i].napi);
1779
1780         return rc;
1781
1782 err_up:
1783         ena_destroy_all_rx_queues(adapter);
1784 err_create_rx_queues:
1785         ena_destroy_all_tx_queues(adapter);
1786 err_create_tx_queues:
1787         ena_free_all_io_rx_resources(adapter);
1788 err_setup_rx:
1789         ena_free_all_io_tx_resources(adapter);
1790 err_setup_tx:
1791         ena_free_io_irq(adapter);
1792 err_req_irq:
1793
1794         return rc;
1795 }
1796
1797 static void ena_down(struct ena_adapter *adapter)
1798 {
1799         netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1800
1801         clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1802
1803         u64_stats_update_begin(&adapter->syncp);
1804         adapter->dev_stats.interface_down++;
1805         u64_stats_update_end(&adapter->syncp);
1806
1807         netif_carrier_off(adapter->netdev);
1808         netif_tx_disable(adapter->netdev);
1809
1810         /* After this point the napi handler won't enable the tx queue */
1811         ena_napi_disable_all(adapter);
1812
1813         /* After destroy the queue there won't be any new interrupts */
1814
1815         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1816                 int rc;
1817
1818                 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1819                 if (rc)
1820                         dev_err(&adapter->pdev->dev, "Device reset failed\n");
1821         }
1822
1823         ena_destroy_all_io_queues(adapter);
1824
1825         ena_disable_io_intr_sync(adapter);
1826         ena_free_io_irq(adapter);
1827         ena_del_napi(adapter);
1828
1829         ena_free_all_tx_bufs(adapter);
1830         ena_free_all_rx_bufs(adapter);
1831         ena_free_all_io_tx_resources(adapter);
1832         ena_free_all_io_rx_resources(adapter);
1833 }
1834
1835 /* ena_open - Called when a network interface is made active
1836  * @netdev: network interface device structure
1837  *
1838  * Returns 0 on success, negative value on failure
1839  *
1840  * The open entry point is called when a network interface is made
1841  * active by the system (IFF_UP).  At this point all resources needed
1842  * for transmit and receive operations are allocated, the interrupt
1843  * handler is registered with the OS, the watchdog timer is started,
1844  * and the stack is notified that the interface is ready.
1845  */
1846 static int ena_open(struct net_device *netdev)
1847 {
1848         struct ena_adapter *adapter = netdev_priv(netdev);
1849         int rc;
1850
1851         /* Notify the stack of the actual queue counts. */
1852         rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1853         if (rc) {
1854                 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1855                 return rc;
1856         }
1857
1858         rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1859         if (rc) {
1860                 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1861                 return rc;
1862         }
1863
1864         rc = ena_up(adapter);
1865         if (rc)
1866                 return rc;
1867
1868         return rc;
1869 }
1870
1871 /* ena_close - Disables a network interface
1872  * @netdev: network interface device structure
1873  *
1874  * Returns 0, this is not allowed to fail
1875  *
1876  * The close entry point is called when an interface is de-activated
1877  * by the OS.  The hardware is still under the drivers control, but
1878  * needs to be disabled.  A global MAC reset is issued to stop the
1879  * hardware, and all transmit and receive resources are freed.
1880  */
1881 static int ena_close(struct net_device *netdev)
1882 {
1883         struct ena_adapter *adapter = netdev_priv(netdev);
1884
1885         netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1886
1887         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1888                 ena_down(adapter);
1889
1890         /* Check for device status and issue reset if needed*/
1891         check_for_admin_com_state(adapter);
1892         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1893                 netif_err(adapter, ifdown, adapter->netdev,
1894                           "Destroy failure, restarting device\n");
1895                 ena_dump_stats_to_dmesg(adapter);
1896                 /* rtnl lock already obtained in dev_ioctl() layer */
1897                 ena_destroy_device(adapter, false);
1898                 ena_restore_device(adapter);
1899         }
1900
1901         return 0;
1902 }
1903
1904 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1905 {
1906         u32 mss = skb_shinfo(skb)->gso_size;
1907         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1908         u8 l4_protocol = 0;
1909
1910         if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1911                 ena_tx_ctx->l4_csum_enable = 1;
1912                 if (mss) {
1913                         ena_tx_ctx->tso_enable = 1;
1914                         ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1915                         ena_tx_ctx->l4_csum_partial = 0;
1916                 } else {
1917                         ena_tx_ctx->tso_enable = 0;
1918                         ena_meta->l4_hdr_len = 0;
1919                         ena_tx_ctx->l4_csum_partial = 1;
1920                 }
1921
1922                 switch (ip_hdr(skb)->version) {
1923                 case IPVERSION:
1924                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1925                         if (ip_hdr(skb)->frag_off & htons(IP_DF))
1926                                 ena_tx_ctx->df = 1;
1927                         if (mss)
1928                                 ena_tx_ctx->l3_csum_enable = 1;
1929                         l4_protocol = ip_hdr(skb)->protocol;
1930                         break;
1931                 case 6:
1932                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1933                         l4_protocol = ipv6_hdr(skb)->nexthdr;
1934                         break;
1935                 default:
1936                         break;
1937                 }
1938
1939                 if (l4_protocol == IPPROTO_TCP)
1940                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1941                 else
1942                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1943
1944                 ena_meta->mss = mss;
1945                 ena_meta->l3_hdr_len = skb_network_header_len(skb);
1946                 ena_meta->l3_hdr_offset = skb_network_offset(skb);
1947                 ena_tx_ctx->meta_valid = 1;
1948
1949         } else {
1950                 ena_tx_ctx->meta_valid = 0;
1951         }
1952 }
1953
1954 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1955                                        struct sk_buff *skb)
1956 {
1957         int num_frags, header_len, rc;
1958
1959         num_frags = skb_shinfo(skb)->nr_frags;
1960         header_len = skb_headlen(skb);
1961
1962         if (num_frags < tx_ring->sgl_size)
1963                 return 0;
1964
1965         if ((num_frags == tx_ring->sgl_size) &&
1966             (header_len < tx_ring->tx_max_header_size))
1967                 return 0;
1968
1969         u64_stats_update_begin(&tx_ring->syncp);
1970         tx_ring->tx_stats.linearize++;
1971         u64_stats_update_end(&tx_ring->syncp);
1972
1973         rc = skb_linearize(skb);
1974         if (unlikely(rc)) {
1975                 u64_stats_update_begin(&tx_ring->syncp);
1976                 tx_ring->tx_stats.linearize_failed++;
1977                 u64_stats_update_end(&tx_ring->syncp);
1978         }
1979
1980         return rc;
1981 }
1982
1983 /* Called with netif_tx_lock. */
1984 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1985 {
1986         struct ena_adapter *adapter = netdev_priv(dev);
1987         struct ena_tx_buffer *tx_info;
1988         struct ena_com_tx_ctx ena_tx_ctx;
1989         struct ena_ring *tx_ring;
1990         struct netdev_queue *txq;
1991         struct ena_com_buf *ena_buf;
1992         void *push_hdr;
1993         u32 len, last_frag;
1994         u16 next_to_use;
1995         u16 req_id;
1996         u16 push_len;
1997         u16 header_len;
1998         dma_addr_t dma;
1999         int qid, rc, nb_hw_desc;
2000         int i = -1;
2001
2002         netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2003         /*  Determine which tx ring we will be placed on */
2004         qid = skb_get_queue_mapping(skb);
2005         tx_ring = &adapter->tx_ring[qid];
2006         txq = netdev_get_tx_queue(dev, qid);
2007
2008         rc = ena_check_and_linearize_skb(tx_ring, skb);
2009         if (unlikely(rc))
2010                 goto error_drop_packet;
2011
2012         skb_tx_timestamp(skb);
2013         len = skb_headlen(skb);
2014
2015         next_to_use = tx_ring->next_to_use;
2016         req_id = tx_ring->free_tx_ids[next_to_use];
2017         tx_info = &tx_ring->tx_buffer_info[req_id];
2018         tx_info->num_of_bufs = 0;
2019
2020         WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2021         ena_buf = tx_info->bufs;
2022         tx_info->skb = skb;
2023
2024         if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2025                 /* prepared the push buffer */
2026                 push_len = min_t(u32, len, tx_ring->tx_max_header_size);
2027                 header_len = push_len;
2028                 push_hdr = skb->data;
2029         } else {
2030                 push_len = 0;
2031                 header_len = min_t(u32, len, tx_ring->tx_max_header_size);
2032                 push_hdr = NULL;
2033         }
2034
2035         netif_dbg(adapter, tx_queued, dev,
2036                   "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2037                   push_hdr, push_len);
2038
2039         if (len > push_len) {
2040                 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2041                                      len - push_len, DMA_TO_DEVICE);
2042                 if (dma_mapping_error(tx_ring->dev, dma))
2043                         goto error_report_dma_error;
2044
2045                 ena_buf->paddr = dma;
2046                 ena_buf->len = len - push_len;
2047
2048                 ena_buf++;
2049                 tx_info->num_of_bufs++;
2050         }
2051
2052         last_frag = skb_shinfo(skb)->nr_frags;
2053
2054         for (i = 0; i < last_frag; i++) {
2055                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2056
2057                 len = skb_frag_size(frag);
2058                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
2059                                        DMA_TO_DEVICE);
2060                 if (dma_mapping_error(tx_ring->dev, dma))
2061                         goto error_report_dma_error;
2062
2063                 ena_buf->paddr = dma;
2064                 ena_buf->len = len;
2065                 ena_buf++;
2066         }
2067
2068         tx_info->num_of_bufs += last_frag;
2069
2070         memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2071         ena_tx_ctx.ena_bufs = tx_info->bufs;
2072         ena_tx_ctx.push_header = push_hdr;
2073         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2074         ena_tx_ctx.req_id = req_id;
2075         ena_tx_ctx.header_len = header_len;
2076
2077         /* set flags and meta data */
2078         ena_tx_csum(&ena_tx_ctx, skb);
2079
2080         /* prepare the packet's descriptors to dma engine */
2081         rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2082                                 &nb_hw_desc);
2083
2084         if (unlikely(rc)) {
2085                 netif_err(adapter, tx_queued, dev,
2086                           "failed to prepare tx bufs\n");
2087                 u64_stats_update_begin(&tx_ring->syncp);
2088                 tx_ring->tx_stats.queue_stop++;
2089                 tx_ring->tx_stats.prepare_ctx_err++;
2090                 u64_stats_update_end(&tx_ring->syncp);
2091                 netif_tx_stop_queue(txq);
2092                 goto error_unmap_dma;
2093         }
2094
2095         netdev_tx_sent_queue(txq, skb->len);
2096
2097         u64_stats_update_begin(&tx_ring->syncp);
2098         tx_ring->tx_stats.cnt++;
2099         tx_ring->tx_stats.bytes += skb->len;
2100         u64_stats_update_end(&tx_ring->syncp);
2101
2102         tx_info->tx_descs = nb_hw_desc;
2103         tx_info->last_jiffies = jiffies;
2104         tx_info->print_once = 0;
2105
2106         tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2107                 tx_ring->ring_size);
2108
2109         /* stop the queue when no more space available, the packet can have up
2110          * to sgl_size + 2. one for the meta descriptor and one for header
2111          * (if the header is larger than tx_max_header_size).
2112          */
2113         if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2114                      (tx_ring->sgl_size + 2))) {
2115                 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2116                           __func__, qid);
2117
2118                 netif_tx_stop_queue(txq);
2119                 u64_stats_update_begin(&tx_ring->syncp);
2120                 tx_ring->tx_stats.queue_stop++;
2121                 u64_stats_update_end(&tx_ring->syncp);
2122
2123                 /* There is a rare condition where this function decide to
2124                  * stop the queue but meanwhile clean_tx_irq updates
2125                  * next_to_completion and terminates.
2126                  * The queue will remain stopped forever.
2127                  * To solve this issue add a mb() to make sure that
2128                  * netif_tx_stop_queue() write is vissible before checking if
2129                  * there is additional space in the queue.
2130                  */
2131                 smp_mb();
2132
2133                 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2134                                 > ENA_TX_WAKEUP_THRESH) {
2135                         netif_tx_wake_queue(txq);
2136                         u64_stats_update_begin(&tx_ring->syncp);
2137                         tx_ring->tx_stats.queue_wakeup++;
2138                         u64_stats_update_end(&tx_ring->syncp);
2139                 }
2140         }
2141
2142         if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2143                 /* trigger the dma engine. ena_com_write_sq_doorbell()
2144                  * has a mb
2145                  */
2146                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2147                 u64_stats_update_begin(&tx_ring->syncp);
2148                 tx_ring->tx_stats.doorbells++;
2149                 u64_stats_update_end(&tx_ring->syncp);
2150         }
2151
2152         return NETDEV_TX_OK;
2153
2154 error_report_dma_error:
2155         u64_stats_update_begin(&tx_ring->syncp);
2156         tx_ring->tx_stats.dma_mapping_err++;
2157         u64_stats_update_end(&tx_ring->syncp);
2158         netdev_warn(adapter->netdev, "failed to map skb\n");
2159
2160         tx_info->skb = NULL;
2161
2162 error_unmap_dma:
2163         if (i >= 0) {
2164                 /* save value of frag that failed */
2165                 last_frag = i;
2166
2167                 /* start back at beginning and unmap skb */
2168                 tx_info->skb = NULL;
2169                 ena_buf = tx_info->bufs;
2170                 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2171                                  dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2172
2173                 /* unmap remaining mapped pages */
2174                 for (i = 0; i < last_frag; i++) {
2175                         ena_buf++;
2176                         dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2177                                        dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2178                 }
2179         }
2180
2181 error_drop_packet:
2182
2183         dev_kfree_skb(skb);
2184         return NETDEV_TX_OK;
2185 }
2186
2187 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2188                             struct net_device *sb_dev,
2189                             select_queue_fallback_t fallback)
2190 {
2191         u16 qid;
2192         /* we suspect that this is good for in--kernel network services that
2193          * want to loop incoming skb rx to tx in normal user generated traffic,
2194          * most probably we will not get to this
2195          */
2196         if (skb_rx_queue_recorded(skb))
2197                 qid = skb_get_rx_queue(skb);
2198         else
2199                 qid = fallback(dev, skb, NULL);
2200
2201         return qid;
2202 }
2203
2204 static void ena_config_host_info(struct ena_com_dev *ena_dev,
2205                                  struct pci_dev *pdev)
2206 {
2207         struct ena_admin_host_info *host_info;
2208         int rc;
2209
2210         /* Allocate only the host info */
2211         rc = ena_com_allocate_host_info(ena_dev);
2212         if (rc) {
2213                 pr_err("Cannot allocate host info\n");
2214                 return;
2215         }
2216
2217         host_info = ena_dev->host_attr.host_info;
2218
2219         host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
2220         host_info->os_type = ENA_ADMIN_OS_LINUX;
2221         host_info->kernel_ver = LINUX_VERSION_CODE;
2222         strncpy(host_info->kernel_ver_str, utsname()->version,
2223                 sizeof(host_info->kernel_ver_str) - 1);
2224         host_info->os_dist = 0;
2225         strncpy(host_info->os_dist_str, utsname()->release,
2226                 sizeof(host_info->os_dist_str) - 1);
2227         host_info->driver_version =
2228                 (DRV_MODULE_VER_MAJOR) |
2229                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2230                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2231                 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2232         host_info->num_cpus = num_online_cpus();
2233
2234         rc = ena_com_set_host_attributes(ena_dev);
2235         if (rc) {
2236                 if (rc == -EOPNOTSUPP)
2237                         pr_warn("Cannot set host attributes\n");
2238                 else
2239                         pr_err("Cannot set host attributes\n");
2240
2241                 goto err;
2242         }
2243
2244         return;
2245
2246 err:
2247         ena_com_delete_host_info(ena_dev);
2248 }
2249
2250 static void ena_config_debug_area(struct ena_adapter *adapter)
2251 {
2252         u32 debug_area_size;
2253         int rc, ss_count;
2254
2255         ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2256         if (ss_count <= 0) {
2257                 netif_err(adapter, drv, adapter->netdev,
2258                           "SS count is negative\n");
2259                 return;
2260         }
2261
2262         /* allocate 32 bytes for each string and 64bit for the value */
2263         debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2264
2265         rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2266         if (rc) {
2267                 pr_err("Cannot allocate debug area\n");
2268                 return;
2269         }
2270
2271         rc = ena_com_set_host_attributes(adapter->ena_dev);
2272         if (rc) {
2273                 if (rc == -EOPNOTSUPP)
2274                         netif_warn(adapter, drv, adapter->netdev,
2275                                    "Cannot set host attributes\n");
2276                 else
2277                         netif_err(adapter, drv, adapter->netdev,
2278                                   "Cannot set host attributes\n");
2279                 goto err;
2280         }
2281
2282         return;
2283 err:
2284         ena_com_delete_debug_area(adapter->ena_dev);
2285 }
2286
2287 static void ena_get_stats64(struct net_device *netdev,
2288                             struct rtnl_link_stats64 *stats)
2289 {
2290         struct ena_adapter *adapter = netdev_priv(netdev);
2291         struct ena_ring *rx_ring, *tx_ring;
2292         unsigned int start;
2293         u64 rx_drops;
2294         int i;
2295
2296         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2297                 return;
2298
2299         for (i = 0; i < adapter->num_queues; i++) {
2300                 u64 bytes, packets;
2301
2302                 tx_ring = &adapter->tx_ring[i];
2303
2304                 do {
2305                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2306                         packets = tx_ring->tx_stats.cnt;
2307                         bytes = tx_ring->tx_stats.bytes;
2308                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2309
2310                 stats->tx_packets += packets;
2311                 stats->tx_bytes += bytes;
2312
2313                 rx_ring = &adapter->rx_ring[i];
2314
2315                 do {
2316                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2317                         packets = rx_ring->rx_stats.cnt;
2318                         bytes = rx_ring->rx_stats.bytes;
2319                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2320
2321                 stats->rx_packets += packets;
2322                 stats->rx_bytes += bytes;
2323         }
2324
2325         do {
2326                 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2327                 rx_drops = adapter->dev_stats.rx_drops;
2328         } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2329
2330         stats->rx_dropped = rx_drops;
2331
2332         stats->multicast = 0;
2333         stats->collisions = 0;
2334
2335         stats->rx_length_errors = 0;
2336         stats->rx_crc_errors = 0;
2337         stats->rx_frame_errors = 0;
2338         stats->rx_fifo_errors = 0;
2339         stats->rx_missed_errors = 0;
2340         stats->tx_window_errors = 0;
2341
2342         stats->rx_errors = 0;
2343         stats->tx_errors = 0;
2344 }
2345
2346 static const struct net_device_ops ena_netdev_ops = {
2347         .ndo_open               = ena_open,
2348         .ndo_stop               = ena_close,
2349         .ndo_start_xmit         = ena_start_xmit,
2350         .ndo_select_queue       = ena_select_queue,
2351         .ndo_get_stats64        = ena_get_stats64,
2352         .ndo_tx_timeout         = ena_tx_timeout,
2353         .ndo_change_mtu         = ena_change_mtu,
2354         .ndo_set_mac_address    = NULL,
2355         .ndo_validate_addr      = eth_validate_addr,
2356 };
2357
2358 static int ena_device_validate_params(struct ena_adapter *adapter,
2359                                       struct ena_com_dev_get_features_ctx *get_feat_ctx)
2360 {
2361         struct net_device *netdev = adapter->netdev;
2362         int rc;
2363
2364         rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2365                               adapter->mac_addr);
2366         if (!rc) {
2367                 netif_err(adapter, drv, netdev,
2368                           "Error, mac address are different\n");
2369                 return -EINVAL;
2370         }
2371
2372         if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2373             (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2374                 netif_err(adapter, drv, netdev,
2375                           "Error, device doesn't support enough queues\n");
2376                 return -EINVAL;
2377         }
2378
2379         if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2380                 netif_err(adapter, drv, netdev,
2381                           "Error, device max mtu is smaller than netdev MTU\n");
2382                 return -EINVAL;
2383         }
2384
2385         return 0;
2386 }
2387
2388 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2389                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
2390                            bool *wd_state)
2391 {
2392         struct device *dev = &pdev->dev;
2393         bool readless_supported;
2394         u32 aenq_groups;
2395         int dma_width;
2396         int rc;
2397
2398         rc = ena_com_mmio_reg_read_request_init(ena_dev);
2399         if (rc) {
2400                 dev_err(dev, "failed to init mmio read less\n");
2401                 return rc;
2402         }
2403
2404         /* The PCIe configuration space revision id indicate if mmio reg
2405          * read is disabled
2406          */
2407         readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2408         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2409
2410         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2411         if (rc) {
2412                 dev_err(dev, "Can not reset device\n");
2413                 goto err_mmio_read_less;
2414         }
2415
2416         rc = ena_com_validate_version(ena_dev);
2417         if (rc) {
2418                 dev_err(dev, "device version is too low\n");
2419                 goto err_mmio_read_less;
2420         }
2421
2422         dma_width = ena_com_get_dma_width(ena_dev);
2423         if (dma_width < 0) {
2424                 dev_err(dev, "Invalid dma width value %d", dma_width);
2425                 rc = dma_width;
2426                 goto err_mmio_read_less;
2427         }
2428
2429         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2430         if (rc) {
2431                 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2432                 goto err_mmio_read_less;
2433         }
2434
2435         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2436         if (rc) {
2437                 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2438                         rc);
2439                 goto err_mmio_read_less;
2440         }
2441
2442         /* ENA admin level init */
2443         rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2444         if (rc) {
2445                 dev_err(dev,
2446                         "Can not initialize ena admin queue with device\n");
2447                 goto err_mmio_read_less;
2448         }
2449
2450         /* To enable the msix interrupts the driver needs to know the number
2451          * of queues. So the driver uses polling mode to retrieve this
2452          * information
2453          */
2454         ena_com_set_admin_polling_mode(ena_dev, true);
2455
2456         ena_config_host_info(ena_dev, pdev);
2457
2458         /* Get Device Attributes*/
2459         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2460         if (rc) {
2461                 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2462                 goto err_admin_init;
2463         }
2464
2465         /* Try to turn all the available aenq groups */
2466         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2467                 BIT(ENA_ADMIN_FATAL_ERROR) |
2468                 BIT(ENA_ADMIN_WARNING) |
2469                 BIT(ENA_ADMIN_NOTIFICATION) |
2470                 BIT(ENA_ADMIN_KEEP_ALIVE);
2471
2472         aenq_groups &= get_feat_ctx->aenq.supported_groups;
2473
2474         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2475         if (rc) {
2476                 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2477                 goto err_admin_init;
2478         }
2479
2480         *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2481
2482         return 0;
2483
2484 err_admin_init:
2485         ena_com_delete_host_info(ena_dev);
2486         ena_com_admin_destroy(ena_dev);
2487 err_mmio_read_less:
2488         ena_com_mmio_reg_read_request_destroy(ena_dev);
2489
2490         return rc;
2491 }
2492
2493 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2494                                                     int io_vectors)
2495 {
2496         struct ena_com_dev *ena_dev = adapter->ena_dev;
2497         struct device *dev = &adapter->pdev->dev;
2498         int rc;
2499
2500         rc = ena_enable_msix(adapter, io_vectors);
2501         if (rc) {
2502                 dev_err(dev, "Can not reserve msix vectors\n");
2503                 return rc;
2504         }
2505
2506         ena_setup_mgmnt_intr(adapter);
2507
2508         rc = ena_request_mgmnt_irq(adapter);
2509         if (rc) {
2510                 dev_err(dev, "Can not setup management interrupts\n");
2511                 goto err_disable_msix;
2512         }
2513
2514         ena_com_set_admin_polling_mode(ena_dev, false);
2515
2516         ena_com_admin_aenq_enable(ena_dev);
2517
2518         return 0;
2519
2520 err_disable_msix:
2521         ena_disable_msix(adapter);
2522
2523         return rc;
2524 }
2525
2526 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2527 {
2528         struct net_device *netdev = adapter->netdev;
2529         struct ena_com_dev *ena_dev = adapter->ena_dev;
2530         bool dev_up;
2531
2532         if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2533                 return;
2534
2535         netif_carrier_off(netdev);
2536
2537         del_timer_sync(&adapter->timer_service);
2538
2539         dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2540         adapter->dev_up_before_reset = dev_up;
2541
2542         if (!graceful)
2543                 ena_com_set_admin_running_state(ena_dev, false);
2544
2545         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2546                 ena_down(adapter);
2547
2548         /* Before releasing the ENA resources, a device reset is required.
2549          * (to prevent the device from accessing them).
2550          * In case the reset flag is set and the device is up, ena_down()
2551          * already perform the reset, so it can be skipped.
2552          */
2553         if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2554                 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2555
2556         ena_free_mgmnt_irq(adapter);
2557
2558         ena_disable_msix(adapter);
2559
2560         ena_com_abort_admin_commands(ena_dev);
2561
2562         ena_com_wait_for_abort_completion(ena_dev);
2563
2564         ena_com_admin_destroy(ena_dev);
2565
2566         ena_com_mmio_reg_read_request_destroy(ena_dev);
2567
2568         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2569
2570         clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2571         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2572 }
2573
2574 static int ena_restore_device(struct ena_adapter *adapter)
2575 {
2576         struct ena_com_dev_get_features_ctx get_feat_ctx;
2577         struct ena_com_dev *ena_dev = adapter->ena_dev;
2578         struct pci_dev *pdev = adapter->pdev;
2579         bool wd_state;
2580         int rc;
2581
2582         set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2583         rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2584         if (rc) {
2585                 dev_err(&pdev->dev, "Can not initialize device\n");
2586                 goto err;
2587         }
2588         adapter->wd_state = wd_state;
2589
2590         rc = ena_device_validate_params(adapter, &get_feat_ctx);
2591         if (rc) {
2592                 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2593                 goto err_device_destroy;
2594         }
2595
2596         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2597         /* Make sure we don't have a race with AENQ Links state handler */
2598         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2599                 netif_carrier_on(adapter->netdev);
2600
2601         rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2602                                                       adapter->num_queues);
2603         if (rc) {
2604                 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2605                 goto err_device_destroy;
2606         }
2607         /* If the interface was up before the reset bring it up */
2608         if (adapter->dev_up_before_reset) {
2609                 rc = ena_up(adapter);
2610                 if (rc) {
2611                         dev_err(&pdev->dev, "Failed to create I/O queues\n");
2612                         goto err_disable_msix;
2613                 }
2614         }
2615
2616         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2617         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2618         dev_err(&pdev->dev, "Device reset completed successfully\n");
2619
2620         return rc;
2621 err_disable_msix:
2622         ena_free_mgmnt_irq(adapter);
2623         ena_disable_msix(adapter);
2624 err_device_destroy:
2625         ena_com_admin_destroy(ena_dev);
2626 err:
2627         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2628         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2629         dev_err(&pdev->dev,
2630                 "Reset attempt failed. Can not reset the device\n");
2631
2632         return rc;
2633 }
2634
2635 static void ena_fw_reset_device(struct work_struct *work)
2636 {
2637         struct ena_adapter *adapter =
2638                 container_of(work, struct ena_adapter, reset_task);
2639         struct pci_dev *pdev = adapter->pdev;
2640
2641         if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2642                 dev_err(&pdev->dev,
2643                         "device reset schedule while reset bit is off\n");
2644                 return;
2645         }
2646         rtnl_lock();
2647         ena_destroy_device(adapter, false);
2648         ena_restore_device(adapter);
2649         rtnl_unlock();
2650 }
2651
2652 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2653                                         struct ena_ring *rx_ring)
2654 {
2655         if (likely(rx_ring->first_interrupt))
2656                 return 0;
2657
2658         if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2659                 return 0;
2660
2661         rx_ring->no_interrupt_event_cnt++;
2662
2663         if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2664                 netif_err(adapter, rx_err, adapter->netdev,
2665                           "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2666                           rx_ring->qid);
2667                 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2668                 smp_mb__before_atomic();
2669                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2670                 return -EIO;
2671         }
2672
2673         return 0;
2674 }
2675
2676 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2677                                           struct ena_ring *tx_ring)
2678 {
2679         struct ena_tx_buffer *tx_buf;
2680         unsigned long last_jiffies;
2681         u32 missed_tx = 0;
2682         int i, rc = 0;
2683
2684         for (i = 0; i < tx_ring->ring_size; i++) {
2685                 tx_buf = &tx_ring->tx_buffer_info[i];
2686                 last_jiffies = tx_buf->last_jiffies;
2687
2688                 if (last_jiffies == 0)
2689                         /* no pending Tx at this location */
2690                         continue;
2691
2692                 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2693                              2 * adapter->missing_tx_completion_to))) {
2694                         /* If after graceful period interrupt is still not
2695                          * received, we schedule a reset
2696                          */
2697                         netif_err(adapter, tx_err, adapter->netdev,
2698                                   "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2699                                   tx_ring->qid);
2700                         adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2701                         smp_mb__before_atomic();
2702                         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2703                         return -EIO;
2704                 }
2705
2706                 if (unlikely(time_is_before_jiffies(last_jiffies +
2707                                 adapter->missing_tx_completion_to))) {
2708                         if (!tx_buf->print_once)
2709                                 netif_notice(adapter, tx_err, adapter->netdev,
2710                                              "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2711                                              tx_ring->qid, i);
2712
2713                         tx_buf->print_once = 1;
2714                         missed_tx++;
2715                 }
2716         }
2717
2718         if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2719                 netif_err(adapter, tx_err, adapter->netdev,
2720                           "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2721                           missed_tx,
2722                           adapter->missing_tx_completion_threshold);
2723                 adapter->reset_reason =
2724                         ENA_REGS_RESET_MISS_TX_CMPL;
2725                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2726                 rc = -EIO;
2727         }
2728
2729         u64_stats_update_begin(&tx_ring->syncp);
2730         tx_ring->tx_stats.missed_tx = missed_tx;
2731         u64_stats_update_end(&tx_ring->syncp);
2732
2733         return rc;
2734 }
2735
2736 static void check_for_missing_completions(struct ena_adapter *adapter)
2737 {
2738         struct ena_ring *tx_ring;
2739         struct ena_ring *rx_ring;
2740         int i, budget, rc;
2741
2742         /* Make sure the driver doesn't turn the device in other process */
2743         smp_rmb();
2744
2745         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2746                 return;
2747
2748         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2749                 return;
2750
2751         if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2752                 return;
2753
2754         budget = ENA_MONITORED_TX_QUEUES;
2755
2756         for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2757                 tx_ring = &adapter->tx_ring[i];
2758                 rx_ring = &adapter->rx_ring[i];
2759
2760                 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2761                 if (unlikely(rc))
2762                         return;
2763
2764                 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2765                 if (unlikely(rc))
2766                         return;
2767
2768                 budget--;
2769                 if (!budget)
2770                         break;
2771         }
2772
2773         adapter->last_monitored_tx_qid = i % adapter->num_queues;
2774 }
2775
2776 /* trigger napi schedule after 2 consecutive detections */
2777 #define EMPTY_RX_REFILL 2
2778 /* For the rare case where the device runs out of Rx descriptors and the
2779  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2780  * for example).
2781  * This case will lead to a deadlock:
2782  * The device won't send interrupts since all the new Rx packets will be dropped
2783  * The napi handler won't allocate new Rx descriptors so the device will be
2784  * able to send new packets.
2785  *
2786  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2787  * It is recommended to have at least 512MB, with a minimum of 128MB for
2788  * constrained environment).
2789  *
2790  * When such a situation is detected - Reschedule napi
2791  */
2792 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2793 {
2794         struct ena_ring *rx_ring;
2795         int i, refill_required;
2796
2797         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2798                 return;
2799
2800         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2801                 return;
2802
2803         for (i = 0; i < adapter->num_queues; i++) {
2804                 rx_ring = &adapter->rx_ring[i];
2805
2806                 refill_required =
2807                         ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2808                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2809                         rx_ring->empty_rx_queue++;
2810
2811                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2812                                 u64_stats_update_begin(&rx_ring->syncp);
2813                                 rx_ring->rx_stats.empty_rx_ring++;
2814                                 u64_stats_update_end(&rx_ring->syncp);
2815
2816                                 netif_err(adapter, drv, adapter->netdev,
2817                                           "trigger refill for ring %d\n", i);
2818
2819                                 napi_schedule(rx_ring->napi);
2820                                 rx_ring->empty_rx_queue = 0;
2821                         }
2822                 } else {
2823                         rx_ring->empty_rx_queue = 0;
2824                 }
2825         }
2826 }
2827
2828 /* Check for keep alive expiration */
2829 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2830 {
2831         unsigned long keep_alive_expired;
2832
2833         if (!adapter->wd_state)
2834                 return;
2835
2836         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2837                 return;
2838
2839         keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2840                                            adapter->keep_alive_timeout);
2841         if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2842                 netif_err(adapter, drv, adapter->netdev,
2843                           "Keep alive watchdog timeout.\n");
2844                 u64_stats_update_begin(&adapter->syncp);
2845                 adapter->dev_stats.wd_expired++;
2846                 u64_stats_update_end(&adapter->syncp);
2847                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2848                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2849         }
2850 }
2851
2852 static void check_for_admin_com_state(struct ena_adapter *adapter)
2853 {
2854         if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2855                 netif_err(adapter, drv, adapter->netdev,
2856                           "ENA admin queue is not in running state!\n");
2857                 u64_stats_update_begin(&adapter->syncp);
2858                 adapter->dev_stats.admin_q_pause++;
2859                 u64_stats_update_end(&adapter->syncp);
2860                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2861                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2862         }
2863 }
2864
2865 static void ena_update_hints(struct ena_adapter *adapter,
2866                              struct ena_admin_ena_hw_hints *hints)
2867 {
2868         struct net_device *netdev = adapter->netdev;
2869
2870         if (hints->admin_completion_tx_timeout)
2871                 adapter->ena_dev->admin_queue.completion_timeout =
2872                         hints->admin_completion_tx_timeout * 1000;
2873
2874         if (hints->mmio_read_timeout)
2875                 /* convert to usec */
2876                 adapter->ena_dev->mmio_read.reg_read_to =
2877                         hints->mmio_read_timeout * 1000;
2878
2879         if (hints->missed_tx_completion_count_threshold_to_reset)
2880                 adapter->missing_tx_completion_threshold =
2881                         hints->missed_tx_completion_count_threshold_to_reset;
2882
2883         if (hints->missing_tx_completion_timeout) {
2884                 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2885                         adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2886                 else
2887                         adapter->missing_tx_completion_to =
2888                                 msecs_to_jiffies(hints->missing_tx_completion_timeout);
2889         }
2890
2891         if (hints->netdev_wd_timeout)
2892                 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2893
2894         if (hints->driver_watchdog_timeout) {
2895                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2896                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2897                 else
2898                         adapter->keep_alive_timeout =
2899                                 msecs_to_jiffies(hints->driver_watchdog_timeout);
2900         }
2901 }
2902
2903 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2904                                  struct net_device *netdev)
2905 {
2906         host_info->supported_network_features[0] =
2907                 netdev->features & GENMASK_ULL(31, 0);
2908         host_info->supported_network_features[1] =
2909                 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
2910 }
2911
2912 static void ena_timer_service(struct timer_list *t)
2913 {
2914         struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2915         u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2916         struct ena_admin_host_info *host_info =
2917                 adapter->ena_dev->host_attr.host_info;
2918
2919         check_for_missing_keep_alive(adapter);
2920
2921         check_for_admin_com_state(adapter);
2922
2923         check_for_missing_completions(adapter);
2924
2925         check_for_empty_rx_ring(adapter);
2926
2927         if (debug_area)
2928                 ena_dump_stats_to_buf(adapter, debug_area);
2929
2930         if (host_info)
2931                 ena_update_host_info(host_info, adapter->netdev);
2932
2933         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2934                 netif_err(adapter, drv, adapter->netdev,
2935                           "Trigger reset is on\n");
2936                 ena_dump_stats_to_dmesg(adapter);
2937                 queue_work(ena_wq, &adapter->reset_task);
2938                 return;
2939         }
2940
2941         /* Reset the timer */
2942         mod_timer(&adapter->timer_service, jiffies + HZ);
2943 }
2944
2945 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2946                                  struct ena_com_dev *ena_dev,
2947                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
2948 {
2949         int io_sq_num, io_queue_num;
2950
2951         /* In case of LLQ use the llq number in the get feature cmd */
2952         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2953                 io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2954
2955                 if (io_sq_num == 0) {
2956                         dev_err(&pdev->dev,
2957                                 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2958
2959                         ena_dev->tx_mem_queue_type =
2960                                 ENA_ADMIN_PLACEMENT_POLICY_HOST;
2961                         io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2962                 }
2963         } else {
2964                 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2965         }
2966
2967         io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2968         io_queue_num = min_t(int, io_queue_num, io_sq_num);
2969         io_queue_num = min_t(int, io_queue_num,
2970                              get_feat_ctx->max_queues.max_cq_num);
2971         /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2972         io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2973         if (unlikely(!io_queue_num)) {
2974                 dev_err(&pdev->dev, "The device doesn't have io queues\n");
2975                 return -EFAULT;
2976         }
2977
2978         return io_queue_num;
2979 }
2980
2981 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
2982                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
2983 {
2984         bool has_mem_bar;
2985
2986         has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
2987
2988         /* Enable push mode if device supports LLQ */
2989         if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
2990                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2991         else
2992                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2993 }
2994
2995 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
2996                                  struct net_device *netdev)
2997 {
2998         netdev_features_t dev_features = 0;
2999
3000         /* Set offload features */
3001         if (feat->offload.tx &
3002                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3003                 dev_features |= NETIF_F_IP_CSUM;
3004
3005         if (feat->offload.tx &
3006                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3007                 dev_features |= NETIF_F_IPV6_CSUM;
3008
3009         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3010                 dev_features |= NETIF_F_TSO;
3011
3012         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3013                 dev_features |= NETIF_F_TSO6;
3014
3015         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3016                 dev_features |= NETIF_F_TSO_ECN;
3017
3018         if (feat->offload.rx_supported &
3019                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3020                 dev_features |= NETIF_F_RXCSUM;
3021
3022         if (feat->offload.rx_supported &
3023                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3024                 dev_features |= NETIF_F_RXCSUM;
3025
3026         netdev->features =
3027                 dev_features |
3028                 NETIF_F_SG |
3029                 NETIF_F_RXHASH |
3030                 NETIF_F_HIGHDMA;
3031
3032         netdev->hw_features |= netdev->features;
3033         netdev->vlan_features |= netdev->features;
3034 }
3035
3036 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3037                                      struct ena_com_dev_get_features_ctx *feat)
3038 {
3039         struct net_device *netdev = adapter->netdev;
3040
3041         /* Copy mac address */
3042         if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3043                 eth_hw_addr_random(netdev);
3044                 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3045         } else {
3046                 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3047                 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3048         }
3049
3050         /* Set offload features */
3051         ena_set_dev_offloads(feat, netdev);
3052
3053         adapter->max_mtu = feat->dev_attr.max_mtu;
3054         netdev->max_mtu = adapter->max_mtu;
3055         netdev->min_mtu = ENA_MIN_MTU;
3056 }
3057
3058 static int ena_rss_init_default(struct ena_adapter *adapter)</