1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "i40e_prototype.h"
6 #include "i40evf_client.h"
7 /* All i40evf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
11 #define CREATE_TRACE_POINTS
12 #include "i40e_trace.h"
14 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
15 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
16 static int i40evf_close(struct net_device *netdev);
18 char i40evf_driver_name[] = "i40evf";
19 static const char i40evf_driver_string[] =
20 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
24 #define DRV_VERSION_MAJOR 3
25 #define DRV_VERSION_MINOR 2
26 #define DRV_VERSION_BUILD 2
27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
31 const char i40evf_driver_version[] = DRV_VERSION;
32 static const char i40evf_copyright[] =
33 "Copyright (c) 2013 - 2015 Intel Corporation.";
35 /* i40evf_pci_tbl - PCI Device ID Table
37 * Wildcard entries (PCI_ANY_ID) should come last
38 * Last entry must be all 0s
40 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
41 * Class, Class Mask, private data (not used) }
43 static const struct pci_device_id i40evf_pci_tbl[] = {
44 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
45 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
46 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
47 {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
48 /* required last entry */
52 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
54 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
55 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_VERSION);
59 static struct workqueue_struct *i40evf_wq;
62 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
63 * @hw: pointer to the HW structure
64 * @mem: ptr to mem struct to fill out
65 * @size: size of memory requested
66 * @alignment: what to align the allocation to
68 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
69 struct i40e_dma_mem *mem,
70 u64 size, u32 alignment)
72 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
75 return I40E_ERR_PARAM;
77 mem->size = ALIGN(size, alignment);
78 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
79 (dma_addr_t *)&mem->pa, GFP_KERNEL);
83 return I40E_ERR_NO_MEMORY;
87 * i40evf_free_dma_mem_d - OS specific memory free for shared code
88 * @hw: pointer to the HW structure
89 * @mem: ptr to mem struct to free
91 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
93 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
96 return I40E_ERR_PARAM;
97 dma_free_coherent(&adapter->pdev->dev, mem->size,
98 mem->va, (dma_addr_t)mem->pa);
103 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
104 * @hw: pointer to the HW structure
105 * @mem: ptr to mem struct to fill out
106 * @size: size of memory requested
108 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
109 struct i40e_virt_mem *mem, u32 size)
112 return I40E_ERR_PARAM;
115 mem->va = kzalloc(size, GFP_KERNEL);
120 return I40E_ERR_NO_MEMORY;
124 * i40evf_free_virt_mem_d - OS specific memory free for shared code
125 * @hw: pointer to the HW structure
126 * @mem: ptr to mem struct to free
128 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
129 struct i40e_virt_mem *mem)
132 return I40E_ERR_PARAM;
134 /* it's ok to kfree a NULL pointer */
141 * i40evf_debug_d - OS dependent version of debug printing
142 * @hw: pointer to the HW structure
143 * @mask: debug level mask
144 * @fmt_str: printf-type format description
146 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
151 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
154 va_start(argptr, fmt_str);
155 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
158 /* the debug string is already formatted with a newline */
163 * i40evf_schedule_reset - Set the flags and schedule a reset event
164 * @adapter: board private structure
166 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
168 if (!(adapter->flags &
169 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
170 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
171 schedule_work(&adapter->reset_task);
176 * i40evf_tx_timeout - Respond to a Tx Hang
177 * @netdev: network interface device structure
179 static void i40evf_tx_timeout(struct net_device *netdev)
181 struct i40evf_adapter *adapter = netdev_priv(netdev);
183 adapter->tx_timeout_count++;
184 i40evf_schedule_reset(adapter);
188 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
189 * @adapter: board private structure
191 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
193 struct i40e_hw *hw = &adapter->hw;
195 if (!adapter->msix_entries)
198 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
201 rd32(hw, I40E_VFGEN_RSTAT);
203 synchronize_irq(adapter->msix_entries[0].vector);
207 * i40evf_misc_irq_enable - Enable default interrupt generation settings
208 * @adapter: board private structure
210 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
212 struct i40e_hw *hw = &adapter->hw;
214 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
215 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
216 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
219 rd32(hw, I40E_VFGEN_RSTAT);
223 * i40evf_irq_disable - Mask off interrupt generation on the NIC
224 * @adapter: board private structure
226 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
229 struct i40e_hw *hw = &adapter->hw;
231 if (!adapter->msix_entries)
234 for (i = 1; i < adapter->num_msix_vectors; i++) {
235 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
236 synchronize_irq(adapter->msix_entries[i].vector);
239 rd32(hw, I40E_VFGEN_RSTAT);
243 * i40evf_irq_enable_queues - Enable interrupt for specified queues
244 * @adapter: board private structure
245 * @mask: bitmap of queues to enable
247 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
249 struct i40e_hw *hw = &adapter->hw;
252 for (i = 1; i < adapter->num_msix_vectors; i++) {
253 if (mask & BIT(i - 1)) {
254 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
255 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
256 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
262 * i40evf_irq_enable - Enable default interrupt generation settings
263 * @adapter: board private structure
264 * @flush: boolean value whether to run rd32()
266 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
268 struct i40e_hw *hw = &adapter->hw;
270 i40evf_misc_irq_enable(adapter);
271 i40evf_irq_enable_queues(adapter, ~0);
274 rd32(hw, I40E_VFGEN_RSTAT);
278 * i40evf_msix_aq - Interrupt handler for vector 0
279 * @irq: interrupt number
280 * @data: pointer to netdev
282 static irqreturn_t i40evf_msix_aq(int irq, void *data)
284 struct net_device *netdev = data;
285 struct i40evf_adapter *adapter = netdev_priv(netdev);
286 struct i40e_hw *hw = &adapter->hw;
288 /* handle non-queue interrupts, these reads clear the registers */
289 rd32(hw, I40E_VFINT_ICR01);
290 rd32(hw, I40E_VFINT_ICR0_ENA1);
292 /* schedule work on the private workqueue */
293 schedule_work(&adapter->adminq_task);
299 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
300 * @irq: interrupt number
301 * @data: pointer to a q_vector
303 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
305 struct i40e_q_vector *q_vector = data;
307 if (!q_vector->tx.ring && !q_vector->rx.ring)
310 napi_schedule_irqoff(&q_vector->napi);
316 * i40evf_map_vector_to_rxq - associate irqs with rx queues
317 * @adapter: board private structure
318 * @v_idx: interrupt number
319 * @r_idx: queue number
322 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
324 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
325 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
326 struct i40e_hw *hw = &adapter->hw;
328 rx_ring->q_vector = q_vector;
329 rx_ring->next = q_vector->rx.ring;
330 rx_ring->vsi = &adapter->vsi;
331 q_vector->rx.ring = rx_ring;
332 q_vector->rx.count++;
333 q_vector->rx.next_update = jiffies + 1;
334 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
335 q_vector->ring_mask |= BIT(r_idx);
336 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
337 q_vector->rx.current_itr);
338 q_vector->rx.current_itr = q_vector->rx.target_itr;
342 * i40evf_map_vector_to_txq - associate irqs with tx queues
343 * @adapter: board private structure
344 * @v_idx: interrupt number
345 * @t_idx: queue number
348 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
350 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
351 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
352 struct i40e_hw *hw = &adapter->hw;
354 tx_ring->q_vector = q_vector;
355 tx_ring->next = q_vector->tx.ring;
356 tx_ring->vsi = &adapter->vsi;
357 q_vector->tx.ring = tx_ring;
358 q_vector->tx.count++;
359 q_vector->tx.next_update = jiffies + 1;
360 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
361 q_vector->num_ringpairs++;
362 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
363 q_vector->tx.target_itr);
364 q_vector->tx.current_itr = q_vector->tx.target_itr;
368 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
369 * @adapter: board private structure to initialize
371 * This function maps descriptor rings to the queue-specific vectors
372 * we were allotted through the MSI-X enabling code. Ideally, we'd have
373 * one vector per ring/queue, but on a constrained vector budget, we
374 * group the rings as "efficiently" as possible. You would add new
375 * mapping configurations in here.
377 static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
379 int rings_remaining = adapter->num_active_queues;
380 int ridx = 0, vidx = 0;
383 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
385 for (; ridx < rings_remaining; ridx++) {
386 i40evf_map_vector_to_rxq(adapter, vidx, ridx);
387 i40evf_map_vector_to_txq(adapter, vidx, ridx);
389 /* In the case where we have more queues than vectors, continue
390 * round-robin on vectors until all queues are mapped.
392 if (++vidx >= q_vectors)
396 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
399 #ifdef CONFIG_NET_POLL_CONTROLLER
401 * i40evf_netpoll - A Polling 'interrupt' handler
402 * @netdev: network interface device structure
404 * This is used by netconsole to send skbs without having to re-enable
405 * interrupts. It's not called while the normal interrupt routine is executing.
407 static void i40evf_netpoll(struct net_device *netdev)
409 struct i40evf_adapter *adapter = netdev_priv(netdev);
410 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
413 /* if interface is down do nothing */
414 if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
417 for (i = 0; i < q_vectors; i++)
418 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
423 * i40evf_irq_affinity_notify - Callback for affinity changes
424 * @notify: context as to what irq was changed
425 * @mask: the new affinity mask
427 * This is a callback function used by the irq_set_affinity_notifier function
428 * so that we may register to receive changes to the irq affinity masks.
430 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
431 const cpumask_t *mask)
433 struct i40e_q_vector *q_vector =
434 container_of(notify, struct i40e_q_vector, affinity_notify);
436 cpumask_copy(&q_vector->affinity_mask, mask);
440 * i40evf_irq_affinity_release - Callback for affinity notifier release
441 * @ref: internal core kernel usage
443 * This is a callback function used by the irq_set_affinity_notifier function
444 * to inform the current notification subscriber that they will no longer
445 * receive notifications.
447 static void i40evf_irq_affinity_release(struct kref *ref) {}
450 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
451 * @adapter: board private structure
452 * @basename: device basename
454 * Allocates MSI-X vectors for tx and rx handling, and requests
455 * interrupts from the kernel.
458 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
460 unsigned int vector, q_vectors;
461 unsigned int rx_int_idx = 0, tx_int_idx = 0;
465 i40evf_irq_disable(adapter);
466 /* Decrement for Other and TCP Timer vectors */
467 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
469 for (vector = 0; vector < q_vectors; vector++) {
470 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
471 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
473 if (q_vector->tx.ring && q_vector->rx.ring) {
474 snprintf(q_vector->name, sizeof(q_vector->name),
475 "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
477 } else if (q_vector->rx.ring) {
478 snprintf(q_vector->name, sizeof(q_vector->name),
479 "i40evf-%s-rx-%d", basename, rx_int_idx++);
480 } else if (q_vector->tx.ring) {
481 snprintf(q_vector->name, sizeof(q_vector->name),
482 "i40evf-%s-tx-%d", basename, tx_int_idx++);
484 /* skip this unused q_vector */
487 err = request_irq(irq_num,
488 i40evf_msix_clean_rings,
493 dev_info(&adapter->pdev->dev,
494 "Request_irq failed, error: %d\n", err);
495 goto free_queue_irqs;
497 /* register for affinity change notifications */
498 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
499 q_vector->affinity_notify.release =
500 i40evf_irq_affinity_release;
501 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
502 /* Spread the IRQ affinity hints across online CPUs. Note that
503 * get_cpu_mask returns a mask with a permanent lifetime so
504 * it's safe to use as a hint for irq_set_affinity_hint.
506 cpu = cpumask_local_spread(q_vector->v_idx, -1);
507 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
515 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
516 irq_set_affinity_notifier(irq_num, NULL);
517 irq_set_affinity_hint(irq_num, NULL);
518 free_irq(irq_num, &adapter->q_vectors[vector]);
524 * i40evf_request_misc_irq - Initialize MSI-X interrupts
525 * @adapter: board private structure
527 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
528 * vector is only for the admin queue, and stays active even when the netdev
531 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
533 struct net_device *netdev = adapter->netdev;
536 snprintf(adapter->misc_vector_name,
537 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
538 dev_name(&adapter->pdev->dev));
539 err = request_irq(adapter->msix_entries[0].vector,
541 adapter->misc_vector_name, netdev);
543 dev_err(&adapter->pdev->dev,
544 "request_irq for %s failed: %d\n",
545 adapter->misc_vector_name, err);
546 free_irq(adapter->msix_entries[0].vector, netdev);
552 * i40evf_free_traffic_irqs - Free MSI-X interrupts
553 * @adapter: board private structure
555 * Frees all MSI-X vectors other than 0.
557 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
559 int vector, irq_num, q_vectors;
561 if (!adapter->msix_entries)
564 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
566 for (vector = 0; vector < q_vectors; vector++) {
567 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
568 irq_set_affinity_notifier(irq_num, NULL);
569 irq_set_affinity_hint(irq_num, NULL);
570 free_irq(irq_num, &adapter->q_vectors[vector]);
575 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
576 * @adapter: board private structure
578 * Frees MSI-X vector 0.
580 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
582 struct net_device *netdev = adapter->netdev;
584 if (!adapter->msix_entries)
587 free_irq(adapter->msix_entries[0].vector, netdev);
591 * i40evf_configure_tx - Configure Transmit Unit after Reset
592 * @adapter: board private structure
594 * Configure the Tx unit of the MAC after a reset.
596 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
598 struct i40e_hw *hw = &adapter->hw;
601 for (i = 0; i < adapter->num_active_queues; i++)
602 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
606 * i40evf_configure_rx - Configure Receive Unit after Reset
607 * @adapter: board private structure
609 * Configure the Rx unit of the MAC after a reset.
611 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
613 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
614 struct i40e_hw *hw = &adapter->hw;
617 /* Legacy Rx will always default to a 2048 buffer size. */
618 #if (PAGE_SIZE < 8192)
619 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
620 struct net_device *netdev = adapter->netdev;
622 /* For jumbo frames on systems with 4K pages we have to use
623 * an order 1 page, so we might as well increase the size
624 * of our Rx buffer to make better use of the available space
626 rx_buf_len = I40E_RXBUFFER_3072;
628 /* We use a 1536 buffer size for configurations with
629 * standard Ethernet mtu. On x86 this gives us enough room
630 * for shared info and 192 bytes of padding.
632 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
633 (netdev->mtu <= ETH_DATA_LEN))
634 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
638 for (i = 0; i < adapter->num_active_queues; i++) {
639 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
640 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
642 if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
643 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
645 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
650 * i40evf_find_vlan - Search filter list for specific vlan filter
651 * @adapter: board private structure
654 * Returns ptr to the filter object or NULL. Must be called while holding the
655 * mac_vlan_list_lock.
658 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
660 struct i40evf_vlan_filter *f;
662 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
670 * i40evf_add_vlan - Add a vlan filter to the list
671 * @adapter: board private structure
674 * Returns ptr to the filter object or NULL when no memory available.
677 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
679 struct i40evf_vlan_filter *f = NULL;
681 spin_lock_bh(&adapter->mac_vlan_list_lock);
683 f = i40evf_find_vlan(adapter, vlan);
685 f = kzalloc(sizeof(*f), GFP_KERNEL);
691 INIT_LIST_HEAD(&f->list);
692 list_add(&f->list, &adapter->vlan_filter_list);
694 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
698 spin_unlock_bh(&adapter->mac_vlan_list_lock);
703 * i40evf_del_vlan - Remove a vlan filter from the list
704 * @adapter: board private structure
707 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
709 struct i40evf_vlan_filter *f;
711 spin_lock_bh(&adapter->mac_vlan_list_lock);
713 f = i40evf_find_vlan(adapter, vlan);
716 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
719 spin_unlock_bh(&adapter->mac_vlan_list_lock);
723 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
724 * @netdev: network device struct
725 * @proto: unused protocol data
728 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
729 __always_unused __be16 proto, u16 vid)
731 struct i40evf_adapter *adapter = netdev_priv(netdev);
733 if (!VLAN_ALLOWED(adapter))
735 if (i40evf_add_vlan(adapter, vid) == NULL)
741 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
742 * @netdev: network device struct
743 * @proto: unused protocol data
746 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
747 __always_unused __be16 proto, u16 vid)
749 struct i40evf_adapter *adapter = netdev_priv(netdev);
751 if (VLAN_ALLOWED(adapter)) {
752 i40evf_del_vlan(adapter, vid);
759 * i40evf_find_filter - Search filter list for specific mac filter
760 * @adapter: board private structure
761 * @macaddr: the MAC address
763 * Returns ptr to the filter object or NULL. Must be called while holding the
764 * mac_vlan_list_lock.
767 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
770 struct i40evf_mac_filter *f;
775 list_for_each_entry(f, &adapter->mac_filter_list, list) {
776 if (ether_addr_equal(macaddr, f->macaddr))
783 * i40e_add_filter - Add a mac filter to the filter list
784 * @adapter: board private structure
785 * @macaddr: the MAC address
787 * Returns ptr to the filter object or NULL when no memory available.
790 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
793 struct i40evf_mac_filter *f;
798 f = i40evf_find_filter(adapter, macaddr);
800 f = kzalloc(sizeof(*f), GFP_ATOMIC);
804 ether_addr_copy(f->macaddr, macaddr);
806 list_add_tail(&f->list, &adapter->mac_filter_list);
808 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
817 * i40evf_set_mac - NDO callback to set port mac address
818 * @netdev: network interface device structure
819 * @p: pointer to an address structure
821 * Returns 0 on success, negative on failure
823 static int i40evf_set_mac(struct net_device *netdev, void *p)
825 struct i40evf_adapter *adapter = netdev_priv(netdev);
826 struct i40e_hw *hw = &adapter->hw;
827 struct i40evf_mac_filter *f;
828 struct sockaddr *addr = p;
830 if (!is_valid_ether_addr(addr->sa_data))
831 return -EADDRNOTAVAIL;
833 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
836 if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
839 spin_lock_bh(&adapter->mac_vlan_list_lock);
841 f = i40evf_find_filter(adapter, hw->mac.addr);
844 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
847 f = i40evf_add_filter(adapter, addr->sa_data);
849 spin_unlock_bh(&adapter->mac_vlan_list_lock);
852 ether_addr_copy(hw->mac.addr, addr->sa_data);
853 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
856 return (f == NULL) ? -ENOMEM : 0;
860 * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
861 * @netdev: the netdevice
862 * @addr: address to add
864 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
865 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
867 static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
869 struct i40evf_adapter *adapter = netdev_priv(netdev);
871 if (i40evf_add_filter(adapter, addr))
878 * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
879 * @netdev: the netdevice
880 * @addr: address to add
882 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
883 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
885 static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
887 struct i40evf_adapter *adapter = netdev_priv(netdev);
888 struct i40evf_mac_filter *f;
890 /* Under some circumstances, we might receive a request to delete
891 * our own device address from our uc list. Because we store the
892 * device address in the VSI's MAC/VLAN filter list, we need to ignore
893 * such requests and not delete our device address from this list.
895 if (ether_addr_equal(addr, netdev->dev_addr))
898 f = i40evf_find_filter(adapter, addr);
901 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
907 * i40evf_set_rx_mode - NDO callback to set the netdev filters
908 * @netdev: network interface device structure
910 static void i40evf_set_rx_mode(struct net_device *netdev)
912 struct i40evf_adapter *adapter = netdev_priv(netdev);
914 spin_lock_bh(&adapter->mac_vlan_list_lock);
915 __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
916 __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
917 spin_unlock_bh(&adapter->mac_vlan_list_lock);
919 if (netdev->flags & IFF_PROMISC &&
920 !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
921 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
922 else if (!(netdev->flags & IFF_PROMISC) &&
923 adapter->flags & I40EVF_FLAG_PROMISC_ON)
924 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
926 if (netdev->flags & IFF_ALLMULTI &&
927 !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
928 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
929 else if (!(netdev->flags & IFF_ALLMULTI) &&
930 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
931 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
935 * i40evf_napi_enable_all - enable NAPI on all queue vectors
936 * @adapter: board private structure
938 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
941 struct i40e_q_vector *q_vector;
942 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
944 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
945 struct napi_struct *napi;
947 q_vector = &adapter->q_vectors[q_idx];
948 napi = &q_vector->napi;
954 * i40evf_napi_disable_all - disable NAPI on all queue vectors
955 * @adapter: board private structure
957 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
960 struct i40e_q_vector *q_vector;
961 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
963 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
964 q_vector = &adapter->q_vectors[q_idx];
965 napi_disable(&q_vector->napi);
970 * i40evf_configure - set up transmit and receive data structures
971 * @adapter: board private structure
973 static void i40evf_configure(struct i40evf_adapter *adapter)
975 struct net_device *netdev = adapter->netdev;
978 i40evf_set_rx_mode(netdev);
980 i40evf_configure_tx(adapter);
981 i40evf_configure_rx(adapter);
982 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
984 for (i = 0; i < adapter->num_active_queues; i++) {
985 struct i40e_ring *ring = &adapter->rx_rings[i];
987 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
992 * i40evf_up_complete - Finish the last steps of bringing up a connection
993 * @adapter: board private structure
995 * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
997 static void i40evf_up_complete(struct i40evf_adapter *adapter)
999 adapter->state = __I40EVF_RUNNING;
1000 clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1002 i40evf_napi_enable_all(adapter);
1004 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1005 if (CLIENT_ENABLED(adapter))
1006 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1007 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1011 * i40e_down - Shutdown the connection processing
1012 * @adapter: board private structure
1014 * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
1016 void i40evf_down(struct i40evf_adapter *adapter)
1018 struct net_device *netdev = adapter->netdev;
1019 struct i40evf_vlan_filter *vlf;
1020 struct i40evf_mac_filter *f;
1021 struct i40evf_cloud_filter *cf;
1023 if (adapter->state <= __I40EVF_DOWN_PENDING)
1026 netif_carrier_off(netdev);
1027 netif_tx_disable(netdev);
1028 adapter->link_up = false;
1029 i40evf_napi_disable_all(adapter);
1030 i40evf_irq_disable(adapter);
1032 spin_lock_bh(&adapter->mac_vlan_list_lock);
1034 /* clear the sync flag on all filters */
1035 __dev_uc_unsync(adapter->netdev, NULL);
1036 __dev_mc_unsync(adapter->netdev, NULL);
1038 /* remove all MAC filters */
1039 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1043 /* remove all VLAN filters */
1044 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1048 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1050 /* remove all cloud filters */
1051 spin_lock_bh(&adapter->cloud_filter_list_lock);
1052 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1055 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1057 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1058 adapter->state != __I40EVF_RESETTING) {
1059 /* cancel any current operation */
1060 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1061 /* Schedule operations to close down the HW. Don't wait
1062 * here for this to complete. The watchdog is still running
1063 * and it will take care of this.
1065 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1066 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1067 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
1068 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1071 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1075 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1076 * @adapter: board private structure
1077 * @vectors: number of vectors to request
1079 * Work with the OS to set up the MSIX vectors needed.
1081 * Returns 0 on success, negative on failure
1084 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1086 int err, vector_threshold;
1088 /* We'll want at least 3 (vector_threshold):
1089 * 0) Other (Admin Queue and link, mostly)
1093 vector_threshold = MIN_MSIX_COUNT;
1095 /* The more we get, the more we will assign to Tx/Rx Cleanup
1096 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1097 * Right now, we simply care about how many we'll get; we'll
1098 * set them up later while requesting irq's.
1100 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1101 vector_threshold, vectors);
1103 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1104 kfree(adapter->msix_entries);
1105 adapter->msix_entries = NULL;
1109 /* Adjust for only the vectors we'll use, which is minimum
1110 * of max_msix_q_vectors + NONQ_VECS, or the number of
1111 * vectors we were allocated.
1113 adapter->num_msix_vectors = err;
1118 * i40evf_free_queues - Free memory for all rings
1119 * @adapter: board private structure to initialize
1121 * Free all of the memory associated with queue pairs.
1123 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1125 if (!adapter->vsi_res)
1127 adapter->num_active_queues = 0;
1128 kfree(adapter->tx_rings);
1129 adapter->tx_rings = NULL;
1130 kfree(adapter->rx_rings);
1131 adapter->rx_rings = NULL;
1135 * i40evf_alloc_queues - Allocate memory for all rings
1136 * @adapter: board private structure to initialize
1138 * We allocate one ring per queue at run-time since we don't know the
1139 * number of queues at compile-time. The polling_netdev array is
1140 * intended for Multiqueue, but should work fine with a single queue.
1142 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1144 int i, num_active_queues;
1146 /* If we're in reset reallocating queues we don't actually know yet for
1147 * certain the PF gave us the number of queues we asked for but we'll
1148 * assume it did. Once basic reset is finished we'll confirm once we
1149 * start negotiating config with PF.
1151 if (adapter->num_req_queues)
1152 num_active_queues = adapter->num_req_queues;
1153 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1155 num_active_queues = adapter->ch_config.total_qps;
1157 num_active_queues = min_t(int,
1158 adapter->vsi_res->num_queue_pairs,
1159 (int)(num_online_cpus()));
1162 adapter->tx_rings = kcalloc(num_active_queues,
1163 sizeof(struct i40e_ring), GFP_KERNEL);
1164 if (!adapter->tx_rings)
1166 adapter->rx_rings = kcalloc(num_active_queues,
1167 sizeof(struct i40e_ring), GFP_KERNEL);
1168 if (!adapter->rx_rings)
1171 for (i = 0; i < num_active_queues; i++) {
1172 struct i40e_ring *tx_ring;
1173 struct i40e_ring *rx_ring;
1175 tx_ring = &adapter->tx_rings[i];
1177 tx_ring->queue_index = i;
1178 tx_ring->netdev = adapter->netdev;
1179 tx_ring->dev = &adapter->pdev->dev;
1180 tx_ring->count = adapter->tx_desc_count;
1181 tx_ring->itr_setting = I40E_ITR_TX_DEF;
1182 if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
1183 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1185 rx_ring = &adapter->rx_rings[i];
1186 rx_ring->queue_index = i;
1187 rx_ring->netdev = adapter->netdev;
1188 rx_ring->dev = &adapter->pdev->dev;
1189 rx_ring->count = adapter->rx_desc_count;
1190 rx_ring->itr_setting = I40E_ITR_RX_DEF;
1193 adapter->num_active_queues = num_active_queues;
1198 i40evf_free_queues(adapter);
1203 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1204 * @adapter: board private structure to initialize
1206 * Attempt to configure the interrupts using the best available
1207 * capabilities of the hardware and the kernel.
1209 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1211 int vector, v_budget;
1215 if (!adapter->vsi_res) {
1219 pairs = adapter->num_active_queues;
1221 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1222 * us much good if we have more vectors than CPUs. However, we already
1223 * limit the total number of queues by the number of CPUs so we do not
1224 * need any further limiting here.
1226 v_budget = min_t(int, pairs + NONQ_VECS,
1227 (int)adapter->vf_res->max_vectors);
1229 adapter->msix_entries = kcalloc(v_budget,
1230 sizeof(struct msix_entry), GFP_KERNEL);
1231 if (!adapter->msix_entries) {
1236 for (vector = 0; vector < v_budget; vector++)
1237 adapter->msix_entries[vector].entry = vector;
1239 err = i40evf_acquire_msix_vectors(adapter, v_budget);
1242 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1243 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1248 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1249 * @adapter: board private structure
1251 * Return 0 on success, negative on failure
1253 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1255 struct i40e_aqc_get_set_rss_key_data *rss_key =
1256 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1257 struct i40e_hw *hw = &adapter->hw;
1260 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1261 /* bail because we already have a command pending */
1262 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1263 adapter->current_op);
1267 ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1269 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1270 i40evf_stat_str(hw, ret),
1271 i40evf_aq_str(hw, hw->aq.asq_last_status));
1276 ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1277 adapter->rss_lut, adapter->rss_lut_size);
1279 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1280 i40evf_stat_str(hw, ret),
1281 i40evf_aq_str(hw, hw->aq.asq_last_status));
1289 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1290 * @adapter: board private structure
1292 * Returns 0 on success, negative on failure
1294 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1296 struct i40e_hw *hw = &adapter->hw;
1300 dw = (u32 *)adapter->rss_key;
1301 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1302 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1304 dw = (u32 *)adapter->rss_lut;
1305 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1306 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1314 * i40evf_config_rss - Configure RSS keys and lut
1315 * @adapter: board private structure
1317 * Returns 0 on success, negative on failure
1319 int i40evf_config_rss(struct i40evf_adapter *adapter)
1322 if (RSS_PF(adapter)) {
1323 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1324 I40EVF_FLAG_AQ_SET_RSS_KEY;
1326 } else if (RSS_AQ(adapter)) {
1327 return i40evf_config_rss_aq(adapter);
1329 return i40evf_config_rss_reg(adapter);
1334 * i40evf_fill_rss_lut - Fill the lut with default values
1335 * @adapter: board private structure
1337 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1341 for (i = 0; i < adapter->rss_lut_size; i++)
1342 adapter->rss_lut[i] = i % adapter->num_active_queues;
1346 * i40evf_init_rss - Prepare for RSS
1347 * @adapter: board private structure
1349 * Return 0 on success, negative on failure
1351 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1353 struct i40e_hw *hw = &adapter->hw;
1356 if (!RSS_PF(adapter)) {
1357 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1358 if (adapter->vf_res->vf_cap_flags &
1359 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1360 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1362 adapter->hena = I40E_DEFAULT_RSS_HENA;
1364 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1365 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1368 i40evf_fill_rss_lut(adapter);
1370 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1371 ret = i40evf_config_rss(adapter);
1377 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1378 * @adapter: board private structure to initialize
1380 * We allocate one q_vector per queue interrupt. If allocation fails we
1383 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1385 int q_idx = 0, num_q_vectors;
1386 struct i40e_q_vector *q_vector;
1388 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1389 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1391 if (!adapter->q_vectors)
1394 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1395 q_vector = &adapter->q_vectors[q_idx];
1396 q_vector->adapter = adapter;
1397 q_vector->vsi = &adapter->vsi;
1398 q_vector->v_idx = q_idx;
1399 q_vector->reg_idx = q_idx;
1400 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1401 netif_napi_add(adapter->netdev, &q_vector->napi,
1402 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1409 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1410 * @adapter: board private structure to initialize
1412 * This function frees the memory allocated to the q_vectors. In addition if
1413 * NAPI is enabled it will delete any references to the NAPI struct prior
1414 * to freeing the q_vector.
1416 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1418 int q_idx, num_q_vectors;
1421 if (!adapter->q_vectors)
1424 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1425 napi_vectors = adapter->num_active_queues;
1427 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1428 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1429 if (q_idx < napi_vectors)
1430 netif_napi_del(&q_vector->napi);
1432 kfree(adapter->q_vectors);
1433 adapter->q_vectors = NULL;
1437 * i40evf_reset_interrupt_capability - Reset MSIX setup
1438 * @adapter: board private structure
1441 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1443 if (!adapter->msix_entries)
1446 pci_disable_msix(adapter->pdev);
1447 kfree(adapter->msix_entries);
1448 adapter->msix_entries = NULL;
1452 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1453 * @adapter: board private structure to initialize
1456 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1460 err = i40evf_alloc_queues(adapter);
1462 dev_err(&adapter->pdev->dev,
1463 "Unable to allocate memory for queues\n");
1464 goto err_alloc_queues;
1468 err = i40evf_set_interrupt_capability(adapter);
1471 dev_err(&adapter->pdev->dev,
1472 "Unable to setup interrupt capabilities\n");
1473 goto err_set_interrupt;
1476 err = i40evf_alloc_q_vectors(adapter);
1478 dev_err(&adapter->pdev->dev,
1479 "Unable to allocate memory for queue vectors\n");
1480 goto err_alloc_q_vectors;
1483 /* If we've made it so far while ADq flag being ON, then we haven't
1484 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1485 * resources have been allocated in the reset path.
1486 * Now we can truly claim that ADq is enabled.
1488 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1490 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1493 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1494 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1495 adapter->num_active_queues);
1498 err_alloc_q_vectors:
1499 i40evf_reset_interrupt_capability(adapter);
1501 i40evf_free_queues(adapter);
1507 * i40evf_free_rss - Free memory used by RSS structs
1508 * @adapter: board private structure
1510 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1512 kfree(adapter->rss_key);
1513 adapter->rss_key = NULL;
1515 kfree(adapter->rss_lut);
1516 adapter->rss_lut = NULL;
1520 * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
1521 * @adapter: board private structure
1523 * Returns 0 on success, negative on failure
1525 static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
1527 struct net_device *netdev = adapter->netdev;
1530 if (netif_running(netdev))
1531 i40evf_free_traffic_irqs(adapter);
1532 i40evf_free_misc_irq(adapter);
1533 i40evf_reset_interrupt_capability(adapter);
1534 i40evf_free_q_vectors(adapter);
1535 i40evf_free_queues(adapter);
1537 err = i40evf_init_interrupt_scheme(adapter);
1541 netif_tx_stop_all_queues(netdev);
1543 err = i40evf_request_misc_irq(adapter);
1547 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1549 i40evf_map_rings_to_vectors(adapter);
1551 if (RSS_AQ(adapter))
1552 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
1554 err = i40evf_init_rss(adapter);
1560 * i40evf_watchdog_timer - Periodic call-back timer
1561 * @data: pointer to adapter disguised as unsigned long
1563 static void i40evf_watchdog_timer(struct timer_list *t)
1565 struct i40evf_adapter *adapter = from_timer(adapter, t,
1568 schedule_work(&adapter->watchdog_task);
1569 /* timer will be rescheduled in watchdog task */
1573 * i40evf_watchdog_task - Periodic call-back task
1574 * @work: pointer to work_struct
1576 static void i40evf_watchdog_task(struct work_struct *work)
1578 struct i40evf_adapter *adapter = container_of(work,
1579 struct i40evf_adapter,
1581 struct i40e_hw *hw = &adapter->hw;
1584 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1585 goto restart_watchdog;
1587 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1588 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1589 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1590 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1591 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1592 /* A chance for redemption! */
1593 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1594 adapter->state = __I40EVF_STARTUP;
1595 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1596 schedule_delayed_work(&adapter->init_task, 10);
1597 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1598 &adapter->crit_section);
1599 /* Don't reschedule the watchdog, since we've restarted
1600 * the init task. When init_task contacts the PF and
1601 * gets everything set up again, it'll restart the
1602 * watchdog for us. Down, boy. Sit. Stay. Woof.
1606 adapter->aq_required = 0;
1607 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1611 if ((adapter->state < __I40EVF_DOWN) ||
1612 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1615 /* check for reset */
1616 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1617 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1618 adapter->state = __I40EVF_RESETTING;
1619 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1620 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1621 schedule_work(&adapter->reset_task);
1622 adapter->aq_required = 0;
1623 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1627 /* Process admin queue tasks. After init, everything gets done
1628 * here so we don't race on the admin queue.
1630 if (adapter->current_op) {
1631 if (!i40evf_asq_done(hw)) {
1632 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1633 i40evf_send_api_ver(adapter);
1637 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1638 i40evf_send_vf_config_msg(adapter);
1642 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1643 i40evf_disable_queues(adapter);
1647 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1648 i40evf_map_queues(adapter);
1652 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1653 i40evf_add_ether_addrs(adapter);
1657 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1658 i40evf_add_vlans(adapter);
1662 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1663 i40evf_del_ether_addrs(adapter);
1667 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1668 i40evf_del_vlans(adapter);
1672 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1673 i40evf_enable_vlan_stripping(adapter);
1677 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1678 i40evf_disable_vlan_stripping(adapter);
1682 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1683 i40evf_configure_queues(adapter);
1687 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1688 i40evf_enable_queues(adapter);
1692 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1693 /* This message goes straight to the firmware, not the
1694 * PF, so we don't have to set current_op as we will
1695 * not get a response through the ARQ.
1697 i40evf_init_rss(adapter);
1698 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1701 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1702 i40evf_get_hena(adapter);
1705 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1706 i40evf_set_hena(adapter);
1709 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1710 i40evf_set_rss_key(adapter);
1713 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1714 i40evf_set_rss_lut(adapter);
1718 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1719 i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1720 FLAG_VF_MULTICAST_PROMISC);
1724 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1725 i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1729 if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1730 (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1731 i40evf_set_promiscuous(adapter, 0);
1735 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
1736 i40evf_enable_channels(adapter);
1740 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
1741 i40evf_disable_channels(adapter);
1745 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1746 i40evf_add_cloud_filter(adapter);
1750 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1751 i40evf_del_cloud_filter(adapter);
1755 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1757 if (adapter->state == __I40EVF_RUNNING)
1758 i40evf_request_stats(adapter);
1760 if (adapter->state == __I40EVF_RUNNING)
1761 i40evf_detect_recover_hung(&adapter->vsi);
1762 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1764 if (adapter->state == __I40EVF_REMOVE)
1766 if (adapter->aq_required)
1767 mod_timer(&adapter->watchdog_timer,
1768 jiffies + msecs_to_jiffies(20));
1770 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1771 schedule_work(&adapter->adminq_task);
1774 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1776 struct i40evf_mac_filter *f, *ftmp;
1777 struct i40evf_vlan_filter *fv, *fvtmp;
1778 struct i40evf_cloud_filter *cf, *cftmp;
1780 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1782 /* We don't use netif_running() because it may be true prior to
1783 * ndo_open() returning, so we can't assume it means all our open
1784 * tasks have finished, since we're not holding the rtnl_lock here.
1786 if (adapter->state == __I40EVF_RUNNING) {
1787 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1788 netif_carrier_off(adapter->netdev);
1789 netif_tx_disable(adapter->netdev);
1790 adapter->link_up = false;
1791 i40evf_napi_disable_all(adapter);
1792 i40evf_irq_disable(adapter);
1793 i40evf_free_traffic_irqs(adapter);
1794 i40evf_free_all_tx_resources(adapter);
1795 i40evf_free_all_rx_resources(adapter);
1798 spin_lock_bh(&adapter->mac_vlan_list_lock);
1800 /* Delete all of the filters */
1801 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1806 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1807 list_del(&fv->list);
1811 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1813 spin_lock_bh(&adapter->cloud_filter_list_lock);
1814 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1815 list_del(&cf->list);
1817 adapter->num_cloud_filters--;
1819 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1821 i40evf_free_misc_irq(adapter);
1822 i40evf_reset_interrupt_capability(adapter);
1823 i40evf_free_queues(adapter);
1824 i40evf_free_q_vectors(adapter);
1825 kfree(adapter->vf_res);
1826 i40evf_shutdown_adminq(&adapter->hw);
1827 adapter->netdev->flags &= ~IFF_UP;
1828 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1829 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1830 adapter->state = __I40EVF_DOWN;
1831 wake_up(&adapter->down_waitqueue);
1832 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1835 #define I40EVF_RESET_WAIT_MS 10
1836 #define I40EVF_RESET_WAIT_COUNT 500
1838 * i40evf_reset_task - Call-back task to handle hardware reset
1839 * @work: pointer to work_struct
1841 * During reset we need to shut down and reinitialize the admin queue
1842 * before we can use it to communicate with the PF again. We also clear
1843 * and reinit the rings because that context is lost as well.
1845 static void i40evf_reset_task(struct work_struct *work)
1847 struct i40evf_adapter *adapter = container_of(work,
1848 struct i40evf_adapter,
1850 struct virtchnl_vf_resource *vfres = adapter->vf_res;
1851 struct net_device *netdev = adapter->netdev;
1852 struct i40e_hw *hw = &adapter->hw;
1853 struct i40evf_vlan_filter *vlf;
1854 struct i40evf_cloud_filter *cf;
1855 struct i40evf_mac_filter *f;
1860 /* When device is being removed it doesn't make sense to run the reset
1861 * task, just return in such a case.
1863 if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
1866 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1867 &adapter->crit_section))
1868 usleep_range(500, 1000);
1869 if (CLIENT_ENABLED(adapter)) {
1870 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1871 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1872 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1873 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1874 cancel_delayed_work_sync(&adapter->client_task);
1875 i40evf_notify_client_close(&adapter->vsi, true);
1877 i40evf_misc_irq_disable(adapter);
1878 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1879 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1880 /* Restart the AQ here. If we have been reset but didn't
1881 * detect it, or if the PF had to reinit, our AQ will be hosed.
1883 i40evf_shutdown_adminq(hw);
1884 i40evf_init_adminq(hw);
1885 i40evf_request_reset(adapter);
1887 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1889 /* poll until we see the reset actually happen */
1890 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1891 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1892 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1895 usleep_range(5000, 10000);
1897 if (i == I40EVF_RESET_WAIT_COUNT) {
1898 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1899 goto continue_reset; /* act like the reset happened */
1902 /* wait until the reset is complete and the PF is responding to us */
1903 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1904 /* sleep first to make sure a minimum wait time is met */
1905 msleep(I40EVF_RESET_WAIT_MS);
1907 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1908 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1909 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1913 pci_set_master(adapter->pdev);
1915 if (i == I40EVF_RESET_WAIT_COUNT) {
1916 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1918 i40evf_disable_vf(adapter);
1919 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1920 return; /* Do not attempt to reinit. It's dead, Jim. */
1924 /* We don't use netif_running() because it may be true prior to
1925 * ndo_open() returning, so we can't assume it means all our open
1926 * tasks have finished, since we're not holding the rtnl_lock here.
1928 running = ((adapter->state == __I40EVF_RUNNING) ||
1929 (adapter->state == __I40EVF_RESETTING));
1932 netif_carrier_off(netdev);
1933 netif_tx_stop_all_queues(netdev);
1934 adapter->link_up = false;
1935 i40evf_napi_disable_all(adapter);
1937 i40evf_irq_disable(adapter);
1939 adapter->state = __I40EVF_RESETTING;
1940 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1942 /* free the Tx/Rx rings and descriptors, might be better to just
1943 * re-use them sometime in the future
1945 i40evf_free_all_rx_resources(adapter);
1946 i40evf_free_all_tx_resources(adapter);
1948 adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
1949 /* kill and reinit the admin queue */
1950 i40evf_shutdown_adminq(hw);
1951 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1952 err = i40evf_init_adminq(hw);
1954 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1956 adapter->aq_required = 0;
1958 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
1959 err = i40evf_reinit_interrupt_scheme(adapter);
1964 adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
1965 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1967 spin_lock_bh(&adapter->mac_vlan_list_lock);
1969 /* re-add all MAC filters */
1970 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1973 /* re-add all VLAN filters */
1974 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1978 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1980 /* check if TCs are running and re-add all cloud filters */
1981 spin_lock_bh(&adapter->cloud_filter_list_lock);
1982 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1984 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1988 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1990 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1991 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1992 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
1993 i40evf_misc_irq_enable(adapter);
1995 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1997 /* We were running when the reset started, so we need to restore some
2001 /* allocate transmit descriptors */
2002 err = i40evf_setup_all_tx_resources(adapter);
2006 /* allocate receive descriptors */
2007 err = i40evf_setup_all_rx_resources(adapter);
2011 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
2012 err = i40evf_request_traffic_irqs(adapter,
2017 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
2020 i40evf_configure(adapter);
2022 i40evf_up_complete(adapter);
2024 i40evf_irq_enable(adapter, true);
2026 adapter->state = __I40EVF_DOWN;
2027 wake_up(&adapter->down_waitqueue);
2029 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2030 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2034 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2035 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2036 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2037 i40evf_close(netdev);
2041 * i40evf_adminq_task - worker thread to clean the admin queue
2042 * @work: pointer to work_struct containing our data
2044 static void i40evf_adminq_task(struct work_struct *work)
2046 struct i40evf_adapter *adapter =
2047 container_of(work, struct i40evf_adapter, adminq_task);
2048 struct i40e_hw *hw = &adapter->hw;
2049 struct i40e_arq_event_info event;
2050 enum virtchnl_ops v_op;
2051 i40e_status ret, v_ret;
2055 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
2058 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
2059 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2064 ret = i40evf_clean_arq_element(hw, &event, &pending);
2065 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2066 v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
2069 break; /* No event to process or error cleaning ARQ */
2071 i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2074 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
2077 if ((adapter->flags &
2078 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
2079 adapter->state == __I40EVF_RESETTING)
2082 /* check for error indications */
2083 val = rd32(hw, hw->aq.arq.len);
2084 if (val == 0xdeadbeef) /* indicates device in reset */
2087 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2088 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2089 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2091 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2092 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2093 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2095 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2096 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2097 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2100 wr32(hw, hw->aq.arq.len, val);
2102 val = rd32(hw, hw->aq.asq.len);
2104 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2105 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2106 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2108 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2109 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2110 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2112 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2113 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2114 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2117 wr32(hw, hw->aq.asq.len, val);
2120 kfree(event.msg_buf);
2122 /* re-enable Admin queue interrupt cause */
2123 i40evf_misc_irq_enable(adapter);
2127 * i40evf_client_task - worker thread to perform client work
2128 * @work: pointer to work_struct containing our data
2130 * This task handles client interactions. Because client calls can be
2131 * reentrant, we can't handle them in the watchdog.
2133 static void i40evf_client_task(struct work_struct *work)
2135 struct i40evf_adapter *adapter =
2136 container_of(work, struct i40evf_adapter, client_task.work);
2138 /* If we can't get the client bit, just give up. We'll be rescheduled
2142 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2145 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2146 i40evf_client_subtask(adapter);
2147 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2150 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2151 i40evf_notify_client_l2_params(&adapter->vsi);
2152 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2155 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2156 i40evf_notify_client_close(&adapter->vsi, false);
2157 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2160 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2161 i40evf_notify_client_open(&adapter->vsi);
2162 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2165 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2169 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2170 * @adapter: board private structure
2172 * Free all transmit software resources
2174 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2178 if (!adapter->tx_rings)
2181 for (i = 0; i < adapter->num_active_queues; i++)
2182 if (adapter->tx_rings[i].desc)
2183 i40evf_free_tx_resources(&adapter->tx_rings[i]);
2187 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2188 * @adapter: board private structure
2190 * If this function returns with an error, then it's possible one or
2191 * more of the rings is populated (while the rest are not). It is the
2192 * callers duty to clean those orphaned rings.
2194 * Return 0 on success, negative on failure
2196 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2200 for (i = 0; i < adapter->num_active_queues; i++) {
2201 adapter->tx_rings[i].count = adapter->tx_desc_count;
2202 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2205 dev_err(&adapter->pdev->dev,
2206 "Allocation for Tx Queue %u failed\n", i);
2214 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2215 * @adapter: board private structure
2217 * If this function returns with an error, then it's possible one or
2218 * more of the rings is populated (while the rest are not). It is the
2219 * callers duty to clean those orphaned rings.
2221 * Return 0 on success, negative on failure
2223 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2227 for (i = 0; i < adapter->num_active_queues; i++) {
2228 adapter->rx_rings[i].count = adapter->rx_desc_count;
2229 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2232 dev_err(&adapter->pdev->dev,
2233 "Allocation for Rx Queue %u failed\n", i);
2240 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2241 * @adapter: board private structure
2243 * Free all receive software resources
2245 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2249 if (!adapter->rx_rings)
2252 for (i = 0; i < adapter->num_active_queues; i++)
2253 if (adapter->rx_rings[i].desc)
2254 i40evf_free_rx_resources(&adapter->rx_rings[i]);
2258 * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
2259 * @adapter: board private structure
2260 * @max_tx_rate: max Tx bw for a tc
2262 static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
2265 int speed = 0, ret = 0;
2267 switch (adapter->link_speed) {
2268 case I40E_LINK_SPEED_40GB:
2271 case I40E_LINK_SPEED_25GB:
2274 case I40E_LINK_SPEED_20GB:
2277 case I40E_LINK_SPEED_10GB:
2280 case I40E_LINK_SPEED_1GB:
2283 case I40E_LINK_SPEED_100MB:
2290 if (max_tx_rate > speed) {
2291 dev_err(&adapter->pdev->dev,
2292 "Invalid tx rate specified\n");
2300 * i40evf_validate_channel_config - validate queue mapping info
2301 * @adapter: board private structure
2302 * @mqprio_qopt: queue parameters
2304 * This function validates if the config provided by the user to
2305 * configure queue channels is valid or not. Returns 0 on a valid
2308 static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
2309 struct tc_mqprio_qopt_offload *mqprio_qopt)
2311 u64 total_max_rate = 0;
2316 if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
2317 mqprio_qopt->qopt.num_tc < 1)
2320 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2321 if (!mqprio_qopt->qopt.count[i] ||
2322 mqprio_qopt->qopt.offset[i] != num_qps)
2324 if (mqprio_qopt->min_rate[i]) {
2325 dev_err(&adapter->pdev->dev,
2326 "Invalid min tx rate (greater than 0) specified\n");
2329 /*convert to Mbps */
2330 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2331 I40EVF_MBPS_DIVISOR);
2332 total_max_rate += tx_rate;
2333 num_qps += mqprio_qopt->qopt.count[i];
2335 if (num_qps > I40EVF_MAX_REQ_QUEUES)
2338 ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
2343 * i40evf_del_all_cloud_filters - delete all cloud filters
2344 * on the traffic classes
2346 static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
2348 struct i40evf_cloud_filter *cf, *cftmp;
2350 spin_lock_bh(&adapter->cloud_filter_list_lock);
2351 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2353 list_del(&cf->list);
2355 adapter->num_cloud_filters--;
2357 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2361 * __i40evf_setup_tc - configure multiple traffic classes
2362 * @netdev: network interface device structure
2363 * @type_date: tc offload data
2365 * This function processes the config information provided by the
2366 * user to configure traffic classes/queue channels and packages the
2367 * information to request the PF to setup traffic classes.
2369 * Returns 0 on success.
2371 static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
2373 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2374 struct i40evf_adapter *adapter = netdev_priv(netdev);
2375 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2376 u8 num_tc = 0, total_qps = 0;
2377 int ret = 0, netdev_tc = 0;
2382 num_tc = mqprio_qopt->qopt.num_tc;
2383 mode = mqprio_qopt->mode;
2385 /* delete queue_channel */
2386 if (!mqprio_qopt->qopt.hw) {
2387 if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
2388 /* reset the tc configuration */
2389 netdev_reset_tc(netdev);
2390 adapter->num_tc = 0;
2391 netif_tx_stop_all_queues(netdev);
2392 netif_tx_disable(netdev);
2393 i40evf_del_all_cloud_filters(adapter);
2394 adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
2401 /* add queue channel */
2402 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2403 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2404 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2407 if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
2408 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2412 ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
2415 /* Return if same TC config is requested */
2416 if (adapter->num_tc == num_tc)
2418 adapter->num_tc = num_tc;
2420 for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
2422 adapter->ch_config.ch_info[i].count =
2423 mqprio_qopt->qopt.count[i];
2424 adapter->ch_config.ch_info[i].offset =
2425 mqprio_qopt->qopt.offset[i];
2426 total_qps += mqprio_qopt->qopt.count[i];
2427 max_tx_rate = mqprio_qopt->max_rate[i];
2428 /* convert to Mbps */
2429 max_tx_rate = div_u64(max_tx_rate,
2430 I40EVF_MBPS_DIVISOR);
2431 adapter->ch_config.ch_info[i].max_tx_rate =
2434 adapter->ch_config.ch_info[i].count = 1;
2435 adapter->ch_config.ch_info[i].offset = 0;
2438 adapter->ch_config.total_qps = total_qps;
2439 netif_tx_stop_all_queues(netdev);
2440 netif_tx_disable(netdev);
2441 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
2442 netdev_reset_tc(netdev);
2443 /* Report the tc mapping up the stack */
2444 netdev_set_num_tc(adapter->netdev, num_tc);
2445 for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
2446 u16 qcount = mqprio_qopt->qopt.count[i];
2447 u16 qoffset = mqprio_qopt->qopt.offset[i];
2450 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2459 * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
2460 * @adapter: board private structure
2461 * @cls_flower: pointer to struct tc_cls_flower_offload
2462 * @filter: pointer to cloud filter structure
2464 static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
2465 struct tc_cls_flower_offload *f,
2466 struct i40evf_cloud_filter *filter)
2468 u16 n_proto_mask = 0;
2469 u16 n_proto_key = 0;
2474 struct virtchnl_filter *vf = &filter->f;
2476 if (f->dissector->used_keys &
2477 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2478 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2479 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2480 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2481 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2482 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2483 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2484 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2485 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2486 f->dissector->used_keys);
2490 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2491 struct flow_dissector_key_keyid *mask =
2492 skb_flow_dissector_target(f->dissector,
2493 FLOW_DISSECTOR_KEY_ENC_KEYID,
2496 if (mask->keyid != 0)
2497 field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
2500 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2501 struct flow_dissector_key_basic *key =
2502 skb_flow_dissector_target(f->dissector,
2503 FLOW_DISSECTOR_KEY_BASIC,
2506 struct flow_dissector_key_basic *mask =
2507 skb_flow_dissector_target(f->dissector,
2508 FLOW_DISSECTOR_KEY_BASIC,
2510 n_proto_key = ntohs(key->n_proto);
2511 n_proto_mask = ntohs(mask->n_proto);
2513 if (n_proto_key == ETH_P_ALL) {
2517 n_proto = n_proto_key & n_proto_mask;
2518 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2520 if (n_proto == ETH_P_IPV6) {
2521 /* specify flow type as TCP IPv6 */
2522 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2525 if (key->ip_proto != IPPROTO_TCP) {
2526 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2531 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2532 struct flow_dissector_key_eth_addrs *key =
2533 skb_flow_dissector_target(f->dissector,
2534 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2537 struct flow_dissector_key_eth_addrs *mask =
2538 skb_flow_dissector_target(f->dissector,
2539 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2541 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2542 if (!is_zero_ether_addr(mask->dst)) {
2543 if (is_broadcast_ether_addr(mask->dst)) {
2544 field_flags |= I40EVF_CLOUD_FIELD_OMAC;
2546 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2548 return I40E_ERR_CONFIG;
2552 if (!is_zero_ether_addr(mask->src)) {
2553 if (is_broadcast_ether_addr(mask->src)) {
2554 field_flags |= I40EVF_CLOUD_FIELD_IMAC;
2556 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2558 return I40E_ERR_CONFIG;
2562 if (!is_zero_ether_addr(key->dst))
2563 if (is_valid_ether_addr(key->dst) ||
2564 is_multicast_ether_addr(key->dst)) {
2565 /* set the mask if a valid dst_mac address */
2566 for (i = 0; i < ETH_ALEN; i++)
2567 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2568 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2572 if (!is_zero_ether_addr(key->src))
2573 if (is_valid_ether_addr(key->src) ||
2574 is_multicast_ether_addr(key->src)) {
2575 /* set the mask if a valid dst_mac address */
2576 for (i = 0; i < ETH_ALEN; i++)
2577 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2578 ether_addr_copy(vf->data.tcp_spec.src_mac,
2583 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2584 struct flow_dissector_key_vlan *key =
2585 skb_flow_dissector_target(f->dissector,
2586 FLOW_DISSECTOR_KEY_VLAN,
2588 struct flow_dissector_key_vlan *mask =
2589 skb_flow_dissector_target(f->dissector,
2590 FLOW_DISSECTOR_KEY_VLAN,
2593 if (mask->vlan_id) {
2594 if (mask->vlan_id == VLAN_VID_MASK) {
2595 field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
2597 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2599 return I40E_ERR_CONFIG;
2602 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2603 vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
2606 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
2607 struct flow_dissector_key_control *key =
2608 skb_flow_dissector_target(f->dissector,
2609 FLOW_DISSECTOR_KEY_CONTROL,
2612 addr_type = key->addr_type;
2615 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2616 struct flow_dissector_key_ipv4_addrs *key =
2617 skb_flow_dissector_target(f->dissector,
2618 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2620 struct flow_dissector_key_ipv4_addrs *mask =
2621 skb_flow_dissector_target(f->dissector,
2622 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2626 if (mask->dst == cpu_to_be32(0xffffffff)) {
2627 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2629 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2630 be32_to_cpu(mask->dst));
2631 return I40E_ERR_CONFIG;
2636 if (mask->src == cpu_to_be32(0xffffffff)) {
2637 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2639 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2640 be32_to_cpu(mask->dst));
2641 return I40E_ERR_CONFIG;
2645 if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
2646 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2647 return I40E_ERR_CONFIG;
2650 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2651 vf->data.tcp_spec.dst_ip[0] = key->dst;
2654 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2655 vf->data.tcp_spec.src_ip[0] = key->src;
2659 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2660 struct flow_dissector_key_ipv6_addrs *key =
2661 skb_flow_dissector_target(f->dissector,
2662 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2664 struct flow_dissector_key_ipv6_addrs *mask =
2665 skb_flow_dissector_target(f->dissector,
2666 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2669 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2670 if (ipv6_addr_any(&mask->dst)) {
2671 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2673 return I40E_ERR_CONFIG;
2676 /* src and dest IPv6 address should not be LOOPBACK
2677 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2679 if (ipv6_addr_loopback(&key->dst) ||
2680 ipv6_addr_loopback(&key->src)) {
2681 dev_err(&adapter->pdev->dev,
2682 "ipv6 addr should not be loopback\n");
2683 return I40E_ERR_CONFIG;
2685 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
2686 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2688 for (i = 0; i < 4; i++)
2689 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2690 memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
2691 sizeof(vf->data.tcp_spec.dst_ip));
2692 for (i = 0; i < 4; i++)
2693 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2694 memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
2695 sizeof(vf->data.tcp_spec.src_ip));
2697 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2698 struct flow_dissector_key_ports *key =
2699 skb_flow_dissector_target(f->dissector,
2700 FLOW_DISSECTOR_KEY_PORTS,
2702 struct flow_dissector_key_ports *mask =
2703 skb_flow_dissector_target(f->dissector,
2704 FLOW_DISSECTOR_KEY_PORTS,
2708 if (mask->src == cpu_to_be16(0xffff)) {
2709 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2711 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2712 be16_to_cpu(mask->src));
2713 return I40E_ERR_CONFIG;
2718 if (mask->dst == cpu_to_be16(0xffff)) {
2719 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2721 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2722 be16_to_cpu(mask->dst));
2723 return I40E_ERR_CONFIG;
2727 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2728 vf->data.tcp_spec.dst_port = key->dst;
2732 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2733 vf->data.tcp_spec.src_port = key->src;
2736 vf->field_flags = field_flags;
2742 * i40evf_handle_tclass - Forward to a traffic class on the device
2743 * @adapter: board private structure
2744 * @tc: traffic class index on the device
2745 * @filter: pointer to cloud filter structure
2747 static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
2748 struct i40evf_cloud_filter *filter)
2752 if (tc < adapter->num_tc) {
2753 if (!filter->f.data.tcp_spec.dst_port) {
2754 dev_err(&adapter->pdev->dev,
2755 "Specify destination port to redirect to traffic class other than TC0\n");
2759 /* redirect to a traffic class on the same device */
2760 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2761 filter->f.action_meta = tc;
2766 * i40evf_configure_clsflower - Add tc flower filters
2767 * @adapter: board private structure
2768 * @cls_flower: Pointer to struct tc_cls_flower_offload
2770 static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
2771 struct tc_cls_flower_offload *cls_flower)
2773 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2774 struct i40evf_cloud_filter *filter = NULL;
2775 int err = -EINVAL, count = 50;
2778 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2782 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2786 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
2787 &adapter->crit_section)) {
2793 filter->cookie = cls_flower->cookie;
2795 /* set the mask to all zeroes to begin with */
2796 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2797 /* start out with flow type and eth type IPv4 to begin with */
2798 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2799 err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
2803 err = i40evf_handle_tclass(adapter, tc, filter);
2807 /* add filter to the list */
2808 spin_lock_bh(&adapter->cloud_filter_list_lock);
2809 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2810 adapter->num_cloud_filters++;
2812 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
2813 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2818 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2822 /* i40evf_find_cf - Find the cloud filter in the list
2823 * @adapter: Board private structure
2824 * @cookie: filter specific cookie
2826 * Returns ptr to the filter object or NULL. Must be called while holding the
2827 * cloud_filter_list_lock.
2829 static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
2830 unsigned long *cookie)
2832 struct i40evf_cloud_filter *filter = NULL;
2837 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2838 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2845 * i40evf_delete_clsflower - Remove tc flower filters
2846 * @adapter: board private structure
2847 * @cls_flower: Pointer to struct tc_cls_flower_offload
2849 static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
2850 struct tc_cls_flower_offload *cls_flower)
2852 struct i40evf_cloud_filter *filter = NULL;
2855 spin_lock_bh(&adapter->cloud_filter_list_lock);
2856 filter = i40evf_find_cf(adapter, &cls_flower->cookie);
2859 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
2863 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2869 * i40evf_setup_tc_cls_flower - flower classifier offloads
2870 * @netdev: net device to configure
2871 * @type_data: offload data
2873 static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
2874 struct tc_cls_flower_offload *cls_flower)
2876 if (cls_flower->common.chain_index)
2879 switch (cls_flower->command) {
2880 case TC_CLSFLOWER_REPLACE:
2881 return i40evf_configure_clsflower(adapter, cls_flower);
2882 case TC_CLSFLOWER_DESTROY:
2883 return i40evf_delete_clsflower(adapter, cls_flower);
2884 case TC_CLSFLOWER_STATS:
2892 * i40evf_setup_tc_block_cb - block callback for tc
2893 * @type: type of offload
2894 * @type_data: offload data
2897 * This function is the block callback for traffic classes
2899 static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2903 case TC_SETUP_CLSFLOWER:
2904 return i40evf_setup_tc_cls_flower(cb_priv, type_data);
2911 * i40evf_setup_tc_block - register callbacks for tc
2912 * @netdev: network interface device structure
2913 * @f: tc offload data
2915 * This function registers block callbacks for tc
2918 static int i40evf_setup_tc_block(struct net_device *dev,
2919 struct tc_block_offload *f)
2921 struct i40evf_adapter *adapter = netdev_priv(dev);
2923 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2926 switch (f->command) {
2928 return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
2929 adapter, adapter, f->extack);
2930 case TC_BLOCK_UNBIND:
2931 tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
2940 * i40evf_setup_tc - configure multiple traffic classes
2941 * @netdev: network interface device structure
2942 * @type: type of offload
2943 * @type_date: tc offload data
2945 * This function is the callback to ndo_setup_tc in the
2948 * Returns 0 on success
2950 static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2954 case TC_SETUP_QDISC_MQPRIO:
2955 return __i40evf_setup_tc(netdev, type_data);
2956 case TC_SETUP_BLOCK:
2957 return i40evf_setup_tc_block(netdev, type_data);
2964 * i40evf_open - Called when a network interface is made active
2965 * @netdev: network interface device structure
2967 * Returns 0 on success, negative value on failure
2969 * The open entry point is called when a network interface is made
2970 * active by the system (IFF_UP). At this point all resources needed
2971 * for transmit and receive operations are allocated, the interrupt
2972 * handler is registered with the OS, the watchdog timer is started,
2973 * and the stack is notified that the interface is ready.
2975 static int i40evf_open(struct net_device *netdev)
2977 struct i40evf_adapter *adapter = netdev_priv(netdev);
2980 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2981 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2985 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
2986 &adapter->crit_section))
2987 usleep_range(500, 1000);
2989 if (adapter->state != __I40EVF_DOWN) {
2994 /* allocate transmit descriptors */
2995 err = i40evf_setup_all_tx_resources(adapter);
2999 /* allocate receive descriptors */
3000 err = i40evf_setup_all_rx_resources(adapter);
3004 /* clear any pending interrupts, may auto mask */
3005 err = i40evf_request_traffic_irqs(adapter, netdev->name);
3009 spin_lock_bh(&adapter->mac_vlan_list_lock);
3011 i40evf_add_filter(adapter, adapter->hw.mac.addr);
3013 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3015 i40evf_configure(adapter);
3017 i40evf_up_complete(adapter);
3019 i40evf_irq_enable(adapter, true);
3021 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3026 i40evf_down(adapter);
3027 i40evf_free_traffic_irqs(adapter);
3029 i40evf_free_all_rx_resources(adapter);
3031 i40evf_free_all_tx_resources(adapter);
3033 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3039 * i40evf_close - Disables a network interface
3040 * @netdev: network interface device structure
3042 * Returns 0, this is not allowed to fail
3044 * The close entry point is called when an interface is de-activated
3045 * by the OS. The hardware is still under the drivers control, but
3046 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3047 * are freed, along with all transmit and receive resources.
3049 static int i40evf_close(struct net_device *netdev)
3051 struct i40evf_adapter *adapter = netdev_priv(netdev);
3054 if (adapter->state <= __I40EVF_DOWN_PENDING)
3057 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
3058 &adapter->crit_section))
3059 usleep_range(500, 1000);
3061 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
3062 if (CLIENT_ENABLED(adapter))
3063 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
3065 i40evf_down(adapter);
3066 adapter->state = __I40EVF_DOWN_PENDING;
3067 i40evf_free_traffic_irqs(adapter);
3069 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3071 /* We explicitly don't free resources here because the hardware is
3072 * still active and can DMA into memory. Resources are cleared in
3073 * i40evf_virtchnl_completion() after we get confirmation from the PF
3074 * driver that the rings have been stopped.
3076 * Also, we wait for state to transition to __I40EVF_DOWN before
3077 * returning. State change occurs in i40evf_virtchnl_completion() after
3078 * VF resources are released (which occurs after PF driver processes and
3079 * responds to admin queue commands).
3082 status = wait_event_timeout(adapter->down_waitqueue,
3083 adapter->state == __I40EVF_DOWN,
3084 msecs_to_jiffies(200));
3086 netdev_warn(netdev, "Device resources not yet released\n");
3091 * i40evf_change_mtu - Change the Maximum Transfer Unit
3092 * @netdev: network interface device structure
3093 * @new_mtu: new value for maximum frame size
3095 * Returns 0 on success, negative on failure
3097 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
3099 struct i40evf_adapter *adapter = netdev_priv(netdev);
3101 netdev->mtu = new_mtu;
3102 if (CLIENT_ENABLED(adapter)) {
3103 i40evf_notify_client_l2_params(&adapter->vsi);
3104 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
3106 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
3107 schedule_work(&adapter->reset_task);
3113 * i40e_set_features - set the netdev feature flags
3114 * @netdev: ptr to the netdev being adjusted
3115 * @features: the feature set that the stack is suggesting
3116 * Note: expects to be called while under rtnl_lock()
3118 static int i40evf_set_features(struct net_device *netdev,
3119 netdev_features_t features)
3121 struct i40evf_adapter *adapter = netdev_priv(netdev);
3123 /* Don't allow changing VLAN_RX flag when adapter is not capable
3126 if (!VLAN_ALLOWED(adapter)) {
3127 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3129 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3130 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3131 adapter->aq_required |=
3132 I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3134 adapter->aq_required |=
3135 I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3142 * i40evf_features_check - Validate encapsulated packet conforms to limits
3144 * @dev: This physical port's netdev
3145 * @features: Offload features that the stack believes apply
3147 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
3148 struct net_device *dev,
3149 netdev_features_t features)
3153 /* No point in doing any of this if neither checksum nor GSO are
3154 * being requested for this frame. We can rule out both by just
3155 * checking for CHECKSUM_PARTIAL
3157 if (skb->ip_summed != CHECKSUM_PARTIAL)
3160 /* We cannot support GSO if the MSS is going to be less than
3161 * 64 bytes. If it is then we need to drop support for GSO.
3163 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3164 features &= ~NETIF_F_GSO_MASK;
3166 /* MACLEN can support at most 63 words */
3167 len = skb_network_header(skb) - skb->data;
3168 if (len & ~(63 * 2))
3171 /* IPLEN and EIPLEN can support at most 127 dwords */
3172 len = skb_transport_header(skb) - skb_network_header(skb);
3173 if (len & ~(127 * 4))
3176 if (skb->encapsulation) {
3177 /* L4TUNLEN can support 127 words */
3178 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3179 if (len & ~(127 * 2))
3182 /* IPLEN can support at most 127 dwords */
3183 len = skb_inner_transport_header(skb) -
3184 skb_inner_network_header(skb);
3185 if (len & ~(127 * 4))
3189 /* No need to validate L4LEN as TCP is the only protocol with a
3190 * a flexible value and we support all possible values supported
3191 * by TCP, which is at most 15 dwords
3196 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3200 * i40evf_fix_features - fix up the netdev feature bits
3201 * @netdev: our net device
3202 * @features: desired feature bits
3204 * Returns fixed-up features bits
3206 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
3207 netdev_features_t features)
3209 struct i40evf_adapter *adapter = netdev_priv(netdev);
3211 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3212 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3213 NETIF_F_HW_VLAN_CTAG_RX |
3214 NETIF_F_HW_VLAN_CTAG_FILTER);
3219 static const struct net_device_ops i40evf_netdev_ops = {
3220 .ndo_open = i40evf_open,
3221 .ndo_stop = i40evf_close,
3222 .ndo_start_xmit = i40evf_xmit_frame,
3223 .ndo_set_rx_mode = i40evf_set_rx_mode,
3224 .ndo_validate_addr = eth_validate_addr,
3225 .ndo_set_mac_address = i40evf_set_mac,
3226 .ndo_change_mtu = i40evf_change_mtu,
3227 .ndo_tx_timeout = i40evf_tx_timeout,
3228 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
3229 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
3230 .ndo_features_check = i40evf_features_check,
3231 .ndo_fix_features = i40evf_fix_features,
3232 .ndo_set_features = i40evf_set_features,
3233 #ifdef CONFIG_NET_POLL_CONTROLLER
3234 .ndo_poll_controller = i40evf_netpoll,
3236 .ndo_setup_tc = i40evf_setup_tc,
3240 * i40evf_check_reset_complete - check that VF reset is complete
3241 * @hw: pointer to hw struct
3243 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3245 static int i40evf_check_reset_complete(struct i40e_hw *hw)
3250 for (i = 0; i < 100; i++) {
3251 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
3252 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
3253 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3254 (rstat == VIRTCHNL_VFR_COMPLETED))
3256 usleep_range(10, 20);
3262 * i40evf_process_config - Process the config information we got from the PF
3263 * @adapter: board private structure
3265 * Verify that we have a valid config struct, and set up our netdev features
3266 * and our VSI struct.
3268 int i40evf_process_config(struct i40evf_adapter *adapter)
3270 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3271 int i, num_req_queues = adapter->num_req_queues;
3272 struct net_device *netdev = adapter->netdev;
3273 struct i40e_vsi *vsi = &adapter->vsi;
3274 netdev_features_t hw_enc_features;
3275 netdev_features_t hw_features;
3277 /* got VF config message back from PF, now we can parse it */
3278 for (i = 0; i < vfres->num_vsis; i++) {
3279 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3280 adapter->vsi_res = &vfres->vsi_res[i];
3282 if (!adapter->vsi_res) {
3283 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3287 if (num_req_queues &&
3288 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3289 /* Problem. The PF gave us fewer queues than what we had
3290 * negotiated in our request. Need a reset to see if we can't
3291 * get back to a working state.
3293 dev_err(&adapter->pdev->dev,
3294 "Requested %d queues, but PF only gave us %d.\n",
3296 adapter->vsi_res->num_queue_pairs);
3297 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
3298 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3299 i40evf_schedule_reset(adapter);
3302 adapter->num_req_queues = 0;
3304 hw_enc_features = NETIF_F_SG |
3308 NETIF_F_SOFT_FEATURES |
3317 /* advertise to stack only if offloads for encapsulated packets is
3320 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3321 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3323 NETIF_F_GSO_GRE_CSUM |
3324 NETIF_F_GSO_IPXIP4 |
3325 NETIF_F_GSO_IPXIP6 |
3326 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3327 NETIF_F_GSO_PARTIAL |
3330 if (!(vfres->vf_cap_flags &
3331 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3332 netdev->gso_partial_features |=
3333 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3335 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3336 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3337 netdev->hw_enc_features |= hw_enc_features;
3339 /* record features VLANs can make use of */
3340 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3342 /* Write features and hw_features separately to avoid polluting
3343 * with, or dropping, features that are set when we registered.
3345 hw_features = hw_enc_features;
3347 /* Enable VLAN features if supported */
3348 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3349 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3350 NETIF_F_HW_VLAN_CTAG_RX);
3351 /* Enable cloud filter if ADQ is supported */
3352 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3353 hw_features |= NETIF_F_HW_TC;
3355 netdev->hw_features |= hw_features;
3357 netdev->features |= hw_features;
3359 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3360 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3362 netdev->priv_flags |= IFF_UNICAST_FLT;
3364 /* Do not turn on offloads when they are requested to be turned off.
3365 * TSO needs minimum 576 bytes to work correctly.
3367 if (netdev->wanted_features) {
3368 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3370 netdev->features &= ~NETIF_F_TSO;
3371 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3373 netdev->features &= ~NETIF_F_TSO6;
3374 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3375 netdev->features &= ~NETIF_F_TSO_ECN;
3376 if (!(netdev->wanted_features & NETIF_F_GRO))
3377 netdev->features &= ~NETIF_F_GRO;
3378 if (!(netdev->wanted_features & NETIF_F_GSO))
3379 netdev->features &= ~NETIF_F_GSO;
3382 adapter->vsi.id = adapter->vsi_res->vsi_id;
3384 adapter->vsi.back = adapter;
3385 adapter->vsi.base_vector = 1;
3386 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
3387 vsi->netdev = adapter->netdev;
3388 vsi->qs_handle = adapter->vsi_res->qset_handle;
3389 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3390 adapter->rss_key_size = vfres->rss_key_size;
3391 adapter->rss_lut_size = vfres->rss_lut_size;
3393 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
3394 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
3401 * i40evf_init_task - worker thread to perform delayed initialization
3402 * @work: pointer to work_struct containing our data
3404 * This task completes the work that was begun in probe. Due to the nature
3405 * of VF-PF communications, we may need to wait tens of milliseconds to get
3406 * responses back from the PF. Rather than busy-wait in probe and bog down the
3407 * whole system, we'll do it in a task so we can sleep.
3408 * This task only runs during driver init. Once we've established
3409 * communications with the PF driver and set up our netdev, the watchdog
3412 static void i40evf_init_task(struct work_struct *work)
3414 struct i40evf_adapter *adapter = container_of(work,
3415 struct i40evf_adapter,
3417 struct net_device *netdev = adapter->netdev;
3418 struct i40e_hw *hw = &adapter->hw;
3419 struct pci_dev *pdev = adapter->pdev;
3422 switch (adapter->state) {
3423 case __I40EVF_STARTUP:
3424 /* driver loaded, probe complete */
3425 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
3426 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
3427 err = i40e_set_mac_type(hw);
3429 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3433 err = i40evf_check_reset_complete(hw);
3435 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3439 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
3440 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
3441 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
3442 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
3444 err = i40evf_init_adminq(hw);
3446 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3450 err = i40evf_send_api_ver(adapter);
3452 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3453 i40evf_shutdown_adminq(hw);
3456 adapter->state = __I40EVF_INIT_VERSION_CHECK;
3458 case __I40EVF_INIT_VERSION_CHECK:
3459 if (!i40evf_asq_done(hw)) {
3460 dev_err(&pdev->dev, "Admin queue command never completed\n");
3461 i40evf_shutdown_adminq(hw);
3462 adapter->state = __I40EVF_STARTUP;
3466 /* aq msg sent, awaiting reply */
3467 err = i40evf_verify_api_ver(adapter);
3469 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3470 err = i40evf_send_api_ver(adapter);
3472 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3473 adapter->pf_version.major,
3474 adapter->pf_version.minor,
3475 VIRTCHNL_VERSION_MAJOR,
3476 VIRTCHNL_VERSION_MINOR);
3479 err = i40evf_send_vf_config_msg(adapter);
3481 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3485 adapter->state = __I40EVF_INIT_GET_RESOURCES;
3487 case __I40EVF_INIT_GET_RESOURCES:
3488 /* aq msg sent, awaiting reply */
3489 if (!adapter->vf_res) {
3490 bufsz = sizeof(struct virtchnl_vf_resource) +
3492 sizeof(struct virtchnl_vsi_resource));
3493 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3494 if (!adapter->vf_res)
3497 err = i40evf_get_vf_config(adapter);
3498 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3499 err = i40evf_send_vf_config_msg(adapter);
3501 } else if (err == I40E_ERR_PARAM) {
3502 /* We only get ERR_PARAM if the device is in a very bad
3503 * state or if we've been disabled for previous bad
3504 * behavior. Either way, we're done now.
3506 i40evf_shutdown_adminq(hw);
3507 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3511 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3515 adapter->state = __I40EVF_INIT_SW;
3521 if (i40evf_process_config(adapter))
3523 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3525 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
3527 netdev->netdev_ops = &i40evf_netdev_ops;
3528 i40evf_set_ethtool_ops(netdev);
3529 netdev->watchdog_timeo = 5 * HZ;
3531 /* MTU range: 68 - 9710 */
3532 netdev->min_mtu = ETH_MIN_MTU;
3533 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
3535 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3536 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3537 adapter->hw.mac.addr);
3538 eth_hw_addr_random(netdev);
3539 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3541 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
3542 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3543 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3546 timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
3547 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3549 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
3550 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
3551 err = i40evf_init_interrupt_scheme(adapter);
3554 i40evf_map_rings_to_vectors(adapter);
3555 if (adapter->vf_res->vf_cap_flags &
3556 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3557 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
3559 err = i40evf_request_misc_irq(adapter);
3563 netif_carrier_off(netdev);
3564 adapter->link_up = false;
3566 if (!adapter->netdev_registered) {
3567 err = register_netdev(netdev);
3572 adapter->netdev_registered = true;
3574 netif_tx_stop_all_queues(netdev);
3575 if (CLIENT_ALLOWED(adapter)) {
3576 err = i40evf_lan_add_device(adapter);
3578 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3582 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3583 if (netdev->features & NETIF_F_GRO)
3584 dev_info(&pdev->dev, "GRO is enabled\n");
3586 adapter->state = __I40EVF_DOWN;
3587 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
3588 i40evf_misc_irq_enable(adapter);
3589 wake_up(&adapter->down_waitqueue);
3591 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3592 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3593 if (!adapter->rss_key || !adapter->rss_lut)
3596 if (RSS_AQ(adapter)) {
3597 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
3598 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3600 i40evf_init_rss(adapter);
3604 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3607 i40evf_free_rss(adapter);
3609 i40evf_free_misc_irq(adapter);
3611 i40evf_reset_interrupt_capability(adapter);
3613 kfree(adapter->vf_res);
3614 adapter->vf_res = NULL;
3616 /* Things went into the weeds, so try again later */
3617 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
3618 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3619 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
3620 i40evf_shutdown_adminq(hw);
3621 adapter->state = __I40EVF_STARTUP;
3622 schedule_delayed_work(&adapter->init_task, HZ * 5);
3625 schedule_delayed_work(&adapter->init_task, HZ);
3629 * i40evf_shutdown - Shutdown the device in preparation for a reboot
3630 * @pdev: pci device structure
3632 static void i40evf_shutdown(struct pci_dev *pdev)
3634 struct net_device *netdev = pci_get_drvdata(pdev);
3635 struct i40evf_adapter *adapter = netdev_priv(netdev);
3637 netif_device_detach(netdev);
3639 if (netif_running(netdev))
3640 i40evf_close(netdev);
3642 /* Prevent the watchdog from running. */
3643 adapter->state = __I40EVF_REMOVE;
3644 adapter->aq_required = 0;
3647 pci_save_state(pdev);
3650 pci_disable_device(pdev);
3654 * i40evf_probe - Device Initialization Routine
3655 * @pdev: PCI device information struct
3656 * @ent: entry in i40evf_pci_tbl
3658 * Returns 0 on success, negative on failure
3660 * i40evf_probe initializes an adapter identified by a pci_dev structure.
3661 * The OS initialization, configuring of the adapter private structure,
3662 * and a hardware reset occur.
3664 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3666 struct net_device *netdev;
3667 struct i40evf_adapter *adapter = NULL;
3668 struct i40e_hw *hw = NULL;
3671 err = pci_enable_device(pdev);
3675 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3677 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3680 "DMA configuration failed: 0x%x\n", err);
3685 err = pci_request_regions(pdev, i40evf_driver_name);
3688 "pci_request_regions failed 0x%x\n", err);
3692 pci_enable_pcie_error_reporting(pdev);
3694 pci_set_master(pdev);
3696 netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
3697 I40EVF_MAX_REQ_QUEUES);
3700 goto err_alloc_etherdev;
3703 SET_NETDEV_DEV(netdev, &pdev->dev);
3705 pci_set_drvdata(pdev, netdev);
3706 adapter = netdev_priv(netdev);
3708 adapter->netdev = netdev;
3709 adapter->pdev = pdev;
3714 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3715 adapter->state = __I40EVF_STARTUP;
3717 /* Call save state here because it relies on the adapter struct. */
3718 pci_save_state(pdev);
3720 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3721 pci_resource_len(pdev, 0));
3726 hw->vendor_id = pdev->vendor;
3727 hw->device_id = pdev->device;
3728 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3729 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3730 hw->subsystem_device_id = pdev->subsystem_device;
3731 hw->bus.device = PCI_SLOT(pdev->devfn);
3732 hw->bus.func = PCI_FUNC(pdev->devfn);
3733 hw->bus.bus_id = pdev->bus->number;
3735 /* set up the locks for the AQ, do this only once in probe
3736 * and destroy them only once in remove
3738 mutex_init(&hw->aq.asq_mutex);
3739 mutex_init(&hw->aq.arq_mutex);
3741 spin_lock_init(&adapter->mac_vlan_list_lock);
3742 spin_lock_init(&adapter->cloud_filter_list_lock);
3744 INIT_LIST_HEAD(&adapter->mac_filter_list);
3745 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3746 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3748 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
3749 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
3750 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
3751 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
3752 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
3753 schedule_delayed_work(&adapter->init_task,
3754 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3756 /* Setup the wait queue for indicating transition to down status */
3757 init_waitqueue_head(&adapter->down_waitqueue);
3762 free_netdev(netdev);
3764 pci_release_regions(pdev);
3767 pci_disable_device(pdev);
3773 * i40evf_suspend - Power management suspend routine
3774 * @pdev: PCI device information struct
3777 * Called when the system (VM) is entering sleep/suspend.
3779 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
3781 struct net_device *netdev = pci_get_drvdata(pdev);
3782 struct i40evf_adapter *adapter = netdev_priv(netdev);
3785 netif_device_detach(netdev);
3787 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
3788 &adapter->crit_section))
3789 usleep_range(500, 1000);
3791 if (netif_running(netdev)) {
3793 i40evf_down(adapter);
3796 i40evf_free_misc_irq(adapter);
3797 i40evf_reset_interrupt_capability(adapter);
3799 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3801 retval = pci_save_state(pdev);
3805 pci_disable_device(pdev);
3811 * i40evf_resume - Power management resume routine
3812 * @pdev: PCI device information struct
3814 * Called when the system (VM) is resumed from sleep/suspend.
3816 static int i40evf_resume(struct pci_dev *pdev)
3818 struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
3819 struct net_device *netdev = adapter->netdev;
3822 pci_set_power_state(pdev, PCI_D0);
3823 pci_restore_state(pdev);
3824 /* pci_restore_state clears dev->state_saved so call
3825 * pci_save_state to restore it.
3827 pci_save_state(pdev);
3829 err = pci_enable_device_mem(pdev);
3831 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3834 pci_set_master(pdev);
3837 err = i40evf_set_interrupt_capability(adapter);
3840 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3843 err = i40evf_request_misc_irq(adapter);
3846 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3850 schedule_work(&adapter->reset_task);
3852 netif_device_attach(netdev);
3857 #endif /* CONFIG_PM */
3859 * i40evf_remove - Device Removal Routine
3860 * @pdev: PCI device information struct
3862 * i40evf_remove is called by the PCI subsystem to alert the driver
3863 * that it should release a PCI device. The could be caused by a
3864 * Hot-Plug event, or because the driver is going to be removed from
3867 static void i40evf_remove(struct pci_dev *pdev)
3869 struct net_device *netdev = pci_get_drvdata(pdev);
3870 struct i40evf_adapter *adapter = netdev_priv(netdev);
3871 struct i40evf_vlan_filter *vlf, *vlftmp;
3872 struct i40evf_mac_filter *f, *ftmp;
3873 struct i40evf_cloud_filter *cf, *cftmp;
3874 struct i40e_hw *hw = &adapter->hw;
3876 /* Indicate we are in remove and not to run reset_task */
3877 set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
3878 cancel_delayed_work_sync(&adapter->init_task);
3879 cancel_work_sync(&adapter->reset_task);
3880 cancel_delayed_work_sync(&adapter->client_task);
3881 if (adapter->netdev_registered) {
3882 unregister_netdev(netdev);
3883 adapter->netdev_registered = false;
3885 if (CLIENT_ALLOWED(adapter)) {
3886 err = i40evf_lan_del_device(adapter);
3888 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3892 /* Shut down all the garbage mashers on the detention level */
3893 adapter->state = __I40EVF_REMOVE;
3894 adapter->aq_required = 0;
3895 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
3896 i40evf_request_reset(adapter);
3898 /* If the FW isn't responding, kick it once, but only once. */
3899 if (!i40evf_asq_done(hw)) {
3900 i40evf_request_reset(adapter);
3903 i40evf_free_all_tx_resources(adapter);
3904 i40evf_free_all_rx_resources(adapter);
3905 i40evf_misc_irq_disable(adapter);
3906 i40evf_free_misc_irq(adapter);
3907 i40evf_reset_interrupt_capability(adapter);
3908 i40evf_free_q_vectors(adapter);
3910 if (adapter->watchdog_timer.function)
3911 del_timer_sync(&adapter->watchdog_timer);
3913 cancel_work_sync(&adapter->adminq_task);
3915 i40evf_free_rss(adapter);
3917 if (hw->aq.asq.count)
3918 i40evf_shutdown_adminq(hw);
3920 /* destroy the locks only once, here */
3921 mutex_destroy(&hw->aq.arq_mutex);
3922 mutex_destroy(&hw->aq.asq_mutex);
3924 iounmap(hw->hw_addr);
3925 pci_release_regions(pdev);
3926 i40evf_free_all_tx_resources(adapter);
3927 i40evf_free_all_rx_resources(adapter);
3928 i40evf_free_queues(adapter);
3929 kfree(adapter->vf_res);
3930 spin_lock_bh(&adapter->mac_vlan_list_lock);
3931 /* If we got removed before an up/down sequence, we've got a filter
3932 * hanging out there that we need to get rid of.
3934 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3938 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3940 list_del(&vlf->list);
3944 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3946 spin_lock_bh(&adapter->cloud_filter_list_lock);
3947 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3948 list_del(&cf->list);
3951 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3953 free_netdev(netdev);
3955 pci_disable_pcie_error_reporting(pdev);
3957 pci_disable_device(pdev);
3960 static struct pci_driver i40evf_driver = {
3961 .name = i40evf_driver_name,
3962 .id_table = i40evf_pci_tbl,
3963 .probe = i40evf_probe,
3964 .remove = i40evf_remove,
3966 .suspend = i40evf_suspend,
3967 .resume = i40evf_resume,
3969 .shutdown = i40evf_shutdown,
3973 * i40e_init_module - Driver Registration Routine
3975 * i40e_init_module is the first routine called when the driver is
3976 * loaded. All it does is register with the PCI subsystem.
3978 static int __init i40evf_init_module(void)
3982 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3983 i40evf_driver_version);
3985 pr_info("%s\n", i40evf_copyright);
3987 i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3988 i40evf_driver_name);
3990 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3993 ret = pci_register_driver(&i40evf_driver);
3997 module_init(i40evf_init_module);
4000 * i40e_exit_module - Driver Exit Cleanup Routine
4002 * i40e_exit_module is called just before the driver is removed
4005 static void __exit i40evf_exit_module(void)
4007 pci_unregister_driver(&i40evf_driver);
4008 destroy_workqueue(i40evf_wq);
4011 module_exit(i40evf_exit_module);