1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
8 #include <generated/utsrelease.h>
12 #include "i40e_diag.h"
14 #include <net/udp_tunnel.h>
15 #include <net/xdp_sock_drv.h>
16 /* All i40e tracepoints are defined by the include below, which
17 * must be included exactly once across the whole kernel with
18 * CREATE_TRACE_POINTS defined
20 #define CREATE_TRACE_POINTS
21 #include "i40e_trace.h"
23 const char i40e_driver_name[] = "i40e";
24 static const char i40e_driver_string[] =
25 "Intel(R) Ethernet Connection XL710 Network Driver";
27 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
29 /* a bit of forward declarations */
30 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
31 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
32 static int i40e_add_vsi(struct i40e_vsi *vsi);
33 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
34 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
35 static int i40e_setup_misc_vector(struct i40e_pf *pf);
36 static void i40e_determine_queue_usage(struct i40e_pf *pf);
37 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
38 static void i40e_prep_for_reset(struct i40e_pf *pf);
39 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
41 static int i40e_reset(struct i40e_pf *pf);
42 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
43 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
44 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
45 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
46 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
47 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
48 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
49 static int i40e_get_capabilities(struct i40e_pf *pf,
50 enum i40e_admin_queue_opc list_type);
51 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
53 /* i40e_pci_tbl - PCI Device ID Table
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
58 * Class, Class Mask, private data (not used) }
60 static const struct pci_device_id i40e_pci_tbl[] = {
61 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
62 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
85 /* required last entry */
88 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
90 #define I40E_MAX_VF_COUNT 128
91 static int debug = -1;
92 module_param(debug, uint, 0);
93 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
95 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
96 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
97 MODULE_LICENSE("GPL v2");
99 static struct workqueue_struct *i40e_wq;
102 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
103 * @hw: pointer to the HW structure
104 * @mem: ptr to mem struct to fill out
105 * @size: size of memory requested
106 * @alignment: what to align the allocation to
108 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 u64 size, u32 alignment)
111 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
113 mem->size = ALIGN(size, alignment);
114 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
123 * i40e_free_dma_mem_d - OS specific memory free for shared code
124 * @hw: pointer to the HW structure
125 * @mem: ptr to mem struct to free
127 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
129 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
131 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
140 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
141 * @hw: pointer to the HW structure
142 * @mem: ptr to mem struct to fill out
143 * @size: size of memory requested
145 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
149 mem->va = kzalloc(size, GFP_KERNEL);
158 * i40e_free_virt_mem_d - OS specific memory free for shared code
159 * @hw: pointer to the HW structure
160 * @mem: ptr to mem struct to free
162 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
164 /* it's ok to kfree a NULL pointer */
173 * i40e_get_lump - find a lump of free generic resource
174 * @pf: board private structure
175 * @pile: the pile of resource to search
176 * @needed: the number of items needed
177 * @id: an owner id to stick on the items assigned
179 * Returns the base item index of the lump, or negative for error
181 * The search_hint trick and lack of advanced fit-finding only work
182 * because we're highly likely to have all the same size lump requests.
183 * Linear search time and any fragmentation should be minimal.
185 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
191 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
192 dev_info(&pf->pdev->dev,
193 "param err: pile=%s needed=%d id=0x%04x\n",
194 pile ? "<valid>" : "<null>", needed, id);
198 /* start the linear search with an imperfect hint */
199 i = pile->search_hint;
200 while (i < pile->num_entries) {
201 /* skip already allocated entries */
202 if (pile->list[i] & I40E_PILE_VALID_BIT) {
207 /* do we have enough in this lump? */
208 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
209 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
214 /* there was enough, so assign it to the requestor */
215 for (j = 0; j < needed; j++)
216 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
218 pile->search_hint = i + j;
222 /* not enough, so skip over it and continue looking */
230 * i40e_put_lump - return a lump of generic resource
231 * @pile: the pile of resource to search
232 * @index: the base item index
233 * @id: the owner id of the items assigned
235 * Returns the count of items in the lump
237 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
239 int valid_id = (id | I40E_PILE_VALID_BIT);
243 if (!pile || index >= pile->num_entries)
247 i < pile->num_entries && pile->list[i] == valid_id;
253 if (count && index < pile->search_hint)
254 pile->search_hint = index;
260 * i40e_find_vsi_from_id - searches for the vsi with the given id
261 * @pf: the pf structure to search for the vsi
262 * @id: id of the vsi it is searching for
264 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
268 for (i = 0; i < pf->num_alloc_vsi; i++)
269 if (pf->vsi[i] && (pf->vsi[i]->id == id))
276 * i40e_service_event_schedule - Schedule the service task to wake up
277 * @pf: board private structure
279 * If not already scheduled, this puts the task into the work queue
281 void i40e_service_event_schedule(struct i40e_pf *pf)
283 if ((!test_bit(__I40E_DOWN, pf->state) &&
284 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
285 test_bit(__I40E_RECOVERY_MODE, pf->state))
286 queue_work(i40e_wq, &pf->service_task);
290 * i40e_tx_timeout - Respond to a Tx Hang
291 * @netdev: network interface device structure
292 * @txqueue: queue number timing out
294 * If any port has noticed a Tx timeout, it is likely that the whole
295 * device is munged, not just the one netdev port, so go for the full
298 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
300 struct i40e_netdev_priv *np = netdev_priv(netdev);
301 struct i40e_vsi *vsi = np->vsi;
302 struct i40e_pf *pf = vsi->back;
303 struct i40e_ring *tx_ring = NULL;
307 pf->tx_timeout_count++;
309 /* with txqueue index, find the tx_ring struct */
310 for (i = 0; i < vsi->num_queue_pairs; i++) {
311 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
313 vsi->tx_rings[i]->queue_index) {
314 tx_ring = vsi->tx_rings[i];
320 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
321 pf->tx_timeout_recovery_level = 1; /* reset after some time */
322 else if (time_before(jiffies,
323 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
324 return; /* don't do any new action before the next timeout */
326 /* don't kick off another recovery if one is already pending */
327 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
331 head = i40e_get_head(tx_ring);
332 /* Read interrupt register */
333 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
335 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
336 tx_ring->vsi->base_vector - 1));
338 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
340 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
341 vsi->seid, txqueue, tx_ring->next_to_clean,
342 head, tx_ring->next_to_use,
343 readl(tx_ring->tail), val);
346 pf->tx_timeout_last_recovery = jiffies;
347 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
348 pf->tx_timeout_recovery_level, txqueue);
350 switch (pf->tx_timeout_recovery_level) {
352 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
355 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
358 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
361 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
365 i40e_service_event_schedule(pf);
366 pf->tx_timeout_recovery_level++;
370 * i40e_get_vsi_stats_struct - Get System Network Statistics
371 * @vsi: the VSI we care about
373 * Returns the address of the device statistics structure.
374 * The statistics are actually updated from the service task.
376 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
378 return &vsi->net_stats;
382 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
383 * @ring: Tx ring to get statistics from
384 * @stats: statistics entry to be updated
386 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
387 struct rtnl_link_stats64 *stats)
393 start = u64_stats_fetch_begin_irq(&ring->syncp);
394 packets = ring->stats.packets;
395 bytes = ring->stats.bytes;
396 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
398 stats->tx_packets += packets;
399 stats->tx_bytes += bytes;
403 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
404 * @netdev: network interface device structure
405 * @stats: data structure to store statistics
407 * Returns the address of the device statistics structure.
408 * The statistics are actually updated from the service task.
410 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
411 struct rtnl_link_stats64 *stats)
413 struct i40e_netdev_priv *np = netdev_priv(netdev);
414 struct i40e_vsi *vsi = np->vsi;
415 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
416 struct i40e_ring *ring;
419 if (test_bit(__I40E_VSI_DOWN, vsi->state))
426 for (i = 0; i < vsi->num_queue_pairs; i++) {
430 ring = READ_ONCE(vsi->tx_rings[i]);
433 i40e_get_netdev_stats_struct_tx(ring, stats);
435 if (i40e_enabled_xdp_vsi(vsi)) {
436 ring = READ_ONCE(vsi->xdp_rings[i]);
439 i40e_get_netdev_stats_struct_tx(ring, stats);
442 ring = READ_ONCE(vsi->rx_rings[i]);
446 start = u64_stats_fetch_begin_irq(&ring->syncp);
447 packets = ring->stats.packets;
448 bytes = ring->stats.bytes;
449 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
451 stats->rx_packets += packets;
452 stats->rx_bytes += bytes;
457 /* following stats updated by i40e_watchdog_subtask() */
458 stats->multicast = vsi_stats->multicast;
459 stats->tx_errors = vsi_stats->tx_errors;
460 stats->tx_dropped = vsi_stats->tx_dropped;
461 stats->rx_errors = vsi_stats->rx_errors;
462 stats->rx_dropped = vsi_stats->rx_dropped;
463 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
464 stats->rx_length_errors = vsi_stats->rx_length_errors;
468 * i40e_vsi_reset_stats - Resets all stats of the given vsi
469 * @vsi: the VSI to have its stats reset
471 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
473 struct rtnl_link_stats64 *ns;
479 ns = i40e_get_vsi_stats_struct(vsi);
480 memset(ns, 0, sizeof(*ns));
481 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
482 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
483 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
484 if (vsi->rx_rings && vsi->rx_rings[0]) {
485 for (i = 0; i < vsi->num_queue_pairs; i++) {
486 memset(&vsi->rx_rings[i]->stats, 0,
487 sizeof(vsi->rx_rings[i]->stats));
488 memset(&vsi->rx_rings[i]->rx_stats, 0,
489 sizeof(vsi->rx_rings[i]->rx_stats));
490 memset(&vsi->tx_rings[i]->stats, 0,
491 sizeof(vsi->tx_rings[i]->stats));
492 memset(&vsi->tx_rings[i]->tx_stats, 0,
493 sizeof(vsi->tx_rings[i]->tx_stats));
496 vsi->stat_offsets_loaded = false;
500 * i40e_pf_reset_stats - Reset all of the stats for the given PF
501 * @pf: the PF to be reset
503 void i40e_pf_reset_stats(struct i40e_pf *pf)
507 memset(&pf->stats, 0, sizeof(pf->stats));
508 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
509 pf->stat_offsets_loaded = false;
511 for (i = 0; i < I40E_MAX_VEB; i++) {
513 memset(&pf->veb[i]->stats, 0,
514 sizeof(pf->veb[i]->stats));
515 memset(&pf->veb[i]->stats_offsets, 0,
516 sizeof(pf->veb[i]->stats_offsets));
517 memset(&pf->veb[i]->tc_stats, 0,
518 sizeof(pf->veb[i]->tc_stats));
519 memset(&pf->veb[i]->tc_stats_offsets, 0,
520 sizeof(pf->veb[i]->tc_stats_offsets));
521 pf->veb[i]->stat_offsets_loaded = false;
524 pf->hw_csum_rx_error = 0;
528 * i40e_stat_update48 - read and update a 48 bit stat from the chip
529 * @hw: ptr to the hardware info
530 * @hireg: the high 32 bit reg to read
531 * @loreg: the low 32 bit reg to read
532 * @offset_loaded: has the initial offset been loaded yet
533 * @offset: ptr to current offset value
534 * @stat: ptr to the stat
536 * Since the device stats are not reset at PFReset, they likely will not
537 * be zeroed when the driver starts. We'll save the first values read
538 * and use them as offsets to be subtracted from the raw values in order
539 * to report stats that count from zero. In the process, we also manage
540 * the potential roll-over.
542 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
543 bool offset_loaded, u64 *offset, u64 *stat)
547 if (hw->device_id == I40E_DEV_ID_QEMU) {
548 new_data = rd32(hw, loreg);
549 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
551 new_data = rd64(hw, loreg);
555 if (likely(new_data >= *offset))
556 *stat = new_data - *offset;
558 *stat = (new_data + BIT_ULL(48)) - *offset;
559 *stat &= 0xFFFFFFFFFFFFULL;
563 * i40e_stat_update32 - read and update a 32 bit stat from the chip
564 * @hw: ptr to the hardware info
565 * @reg: the hw reg to read
566 * @offset_loaded: has the initial offset been loaded yet
567 * @offset: ptr to current offset value
568 * @stat: ptr to the stat
570 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
571 bool offset_loaded, u64 *offset, u64 *stat)
575 new_data = rd32(hw, reg);
578 if (likely(new_data >= *offset))
579 *stat = (u32)(new_data - *offset);
581 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
585 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
586 * @hw: ptr to the hardware info
587 * @reg: the hw reg to read and clear
588 * @stat: ptr to the stat
590 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
592 u32 new_data = rd32(hw, reg);
594 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
599 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
600 * @vsi: the VSI to be updated
602 void i40e_update_eth_stats(struct i40e_vsi *vsi)
604 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
605 struct i40e_pf *pf = vsi->back;
606 struct i40e_hw *hw = &pf->hw;
607 struct i40e_eth_stats *oes;
608 struct i40e_eth_stats *es; /* device's eth stats */
610 es = &vsi->eth_stats;
611 oes = &vsi->eth_stats_offsets;
613 /* Gather up the stats that the hw collects */
614 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
615 vsi->stat_offsets_loaded,
616 &oes->tx_errors, &es->tx_errors);
617 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
618 vsi->stat_offsets_loaded,
619 &oes->rx_discards, &es->rx_discards);
620 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
621 vsi->stat_offsets_loaded,
622 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
624 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
625 I40E_GLV_GORCL(stat_idx),
626 vsi->stat_offsets_loaded,
627 &oes->rx_bytes, &es->rx_bytes);
628 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
629 I40E_GLV_UPRCL(stat_idx),
630 vsi->stat_offsets_loaded,
631 &oes->rx_unicast, &es->rx_unicast);
632 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
633 I40E_GLV_MPRCL(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->rx_multicast, &es->rx_multicast);
636 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
637 I40E_GLV_BPRCL(stat_idx),
638 vsi->stat_offsets_loaded,
639 &oes->rx_broadcast, &es->rx_broadcast);
641 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
642 I40E_GLV_GOTCL(stat_idx),
643 vsi->stat_offsets_loaded,
644 &oes->tx_bytes, &es->tx_bytes);
645 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
646 I40E_GLV_UPTCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->tx_unicast, &es->tx_unicast);
649 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
650 I40E_GLV_MPTCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->tx_multicast, &es->tx_multicast);
653 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
654 I40E_GLV_BPTCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->tx_broadcast, &es->tx_broadcast);
657 vsi->stat_offsets_loaded = true;
661 * i40e_update_veb_stats - Update Switch component statistics
662 * @veb: the VEB being updated
664 void i40e_update_veb_stats(struct i40e_veb *veb)
666 struct i40e_pf *pf = veb->pf;
667 struct i40e_hw *hw = &pf->hw;
668 struct i40e_eth_stats *oes;
669 struct i40e_eth_stats *es; /* device's eth stats */
670 struct i40e_veb_tc_stats *veb_oes;
671 struct i40e_veb_tc_stats *veb_es;
674 idx = veb->stats_idx;
676 oes = &veb->stats_offsets;
677 veb_es = &veb->tc_stats;
678 veb_oes = &veb->tc_stats_offsets;
680 /* Gather up the stats that the hw collects */
681 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
682 veb->stat_offsets_loaded,
683 &oes->tx_discards, &es->tx_discards);
684 if (hw->revision_id > 0)
685 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unknown_protocol,
688 &es->rx_unknown_protocol);
689 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
690 veb->stat_offsets_loaded,
691 &oes->rx_bytes, &es->rx_bytes);
692 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unicast, &es->rx_unicast);
695 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_multicast, &es->rx_multicast);
698 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->rx_broadcast, &es->rx_broadcast);
702 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_bytes, &es->tx_bytes);
705 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->tx_unicast, &es->tx_unicast);
708 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->tx_multicast, &es->tx_multicast);
711 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_broadcast, &es->tx_broadcast);
714 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
715 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
716 I40E_GLVEBTC_RPCL(i, idx),
717 veb->stat_offsets_loaded,
718 &veb_oes->tc_rx_packets[i],
719 &veb_es->tc_rx_packets[i]);
720 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
721 I40E_GLVEBTC_RBCL(i, idx),
722 veb->stat_offsets_loaded,
723 &veb_oes->tc_rx_bytes[i],
724 &veb_es->tc_rx_bytes[i]);
725 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
726 I40E_GLVEBTC_TPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_tx_packets[i],
729 &veb_es->tc_tx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
731 I40E_GLVEBTC_TBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_tx_bytes[i],
734 &veb_es->tc_tx_bytes[i]);
736 veb->stat_offsets_loaded = true;
740 * i40e_update_vsi_stats - Update the vsi statistics counters.
741 * @vsi: the VSI to be updated
743 * There are a few instances where we store the same stat in a
744 * couple of different structs. This is partly because we have
745 * the netdev stats that need to be filled out, which is slightly
746 * different from the "eth_stats" defined by the chip and used in
747 * VF communications. We sort it out here.
749 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
751 struct i40e_pf *pf = vsi->back;
752 struct rtnl_link_stats64 *ons;
753 struct rtnl_link_stats64 *ns; /* netdev stats */
754 struct i40e_eth_stats *oes;
755 struct i40e_eth_stats *es; /* device's eth stats */
756 u32 tx_restart, tx_busy;
767 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
768 test_bit(__I40E_CONFIG_BUSY, pf->state))
771 ns = i40e_get_vsi_stats_struct(vsi);
772 ons = &vsi->net_stats_offsets;
773 es = &vsi->eth_stats;
774 oes = &vsi->eth_stats_offsets;
776 /* Gather up the netdev and vsi stats that the driver collects
777 * on the fly during packet processing
781 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
785 for (q = 0; q < vsi->num_queue_pairs; q++) {
787 p = READ_ONCE(vsi->tx_rings[q]);
792 start = u64_stats_fetch_begin_irq(&p->syncp);
793 packets = p->stats.packets;
794 bytes = p->stats.bytes;
795 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
798 tx_restart += p->tx_stats.restart_queue;
799 tx_busy += p->tx_stats.tx_busy;
800 tx_linearize += p->tx_stats.tx_linearize;
801 tx_force_wb += p->tx_stats.tx_force_wb;
804 p = READ_ONCE(vsi->rx_rings[q]);
809 start = u64_stats_fetch_begin_irq(&p->syncp);
810 packets = p->stats.packets;
811 bytes = p->stats.bytes;
812 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 rx_buf += p->rx_stats.alloc_buff_failed;
816 rx_page += p->rx_stats.alloc_page_failed;
818 if (i40e_enabled_xdp_vsi(vsi)) {
819 /* locate XDP ring */
820 p = READ_ONCE(vsi->xdp_rings[q]);
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
831 tx_restart += p->tx_stats.restart_queue;
832 tx_busy += p->tx_stats.tx_busy;
833 tx_linearize += p->tx_stats.tx_linearize;
834 tx_force_wb += p->tx_stats.tx_force_wb;
838 vsi->tx_restart = tx_restart;
839 vsi->tx_busy = tx_busy;
840 vsi->tx_linearize = tx_linearize;
841 vsi->tx_force_wb = tx_force_wb;
842 vsi->rx_page_failed = rx_page;
843 vsi->rx_buf_failed = rx_buf;
845 ns->rx_packets = rx_p;
847 ns->tx_packets = tx_p;
850 /* update netdev stats from eth stats */
851 i40e_update_eth_stats(vsi);
852 ons->tx_errors = oes->tx_errors;
853 ns->tx_errors = es->tx_errors;
854 ons->multicast = oes->rx_multicast;
855 ns->multicast = es->rx_multicast;
856 ons->rx_dropped = oes->rx_discards;
857 ns->rx_dropped = es->rx_discards;
858 ons->tx_dropped = oes->tx_discards;
859 ns->tx_dropped = es->tx_discards;
861 /* pull in a couple PF stats if this is the main vsi */
862 if (vsi == pf->vsi[pf->lan_vsi]) {
863 ns->rx_crc_errors = pf->stats.crc_errors;
864 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
865 ns->rx_length_errors = pf->stats.rx_length_errors;
870 * i40e_update_pf_stats - Update the PF statistics counters.
871 * @pf: the PF to be updated
873 static void i40e_update_pf_stats(struct i40e_pf *pf)
875 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
876 struct i40e_hw_port_stats *nsd = &pf->stats;
877 struct i40e_hw *hw = &pf->hw;
881 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
882 I40E_GLPRT_GORCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
885 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
886 I40E_GLPRT_GOTCL(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
889 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
890 pf->stat_offsets_loaded,
891 &osd->eth.rx_discards,
892 &nsd->eth.rx_discards);
893 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
894 I40E_GLPRT_UPRCL(hw->port),
895 pf->stat_offsets_loaded,
896 &osd->eth.rx_unicast,
897 &nsd->eth.rx_unicast);
898 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
899 I40E_GLPRT_MPRCL(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->eth.rx_multicast,
902 &nsd->eth.rx_multicast);
903 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
904 I40E_GLPRT_BPRCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.rx_broadcast,
907 &nsd->eth.rx_broadcast);
908 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
909 I40E_GLPRT_UPTCL(hw->port),
910 pf->stat_offsets_loaded,
911 &osd->eth.tx_unicast,
912 &nsd->eth.tx_unicast);
913 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
914 I40E_GLPRT_MPTCL(hw->port),
915 pf->stat_offsets_loaded,
916 &osd->eth.tx_multicast,
917 &nsd->eth.tx_multicast);
918 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
919 I40E_GLPRT_BPTCL(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->eth.tx_broadcast,
922 &nsd->eth.tx_broadcast);
924 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->tx_dropped_link_down,
927 &nsd->tx_dropped_link_down);
929 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->crc_errors, &nsd->crc_errors);
933 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->illegal_bytes, &nsd->illegal_bytes);
937 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->mac_local_faults,
940 &nsd->mac_local_faults);
941 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->mac_remote_faults,
944 &nsd->mac_remote_faults);
946 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->rx_length_errors,
949 &nsd->rx_length_errors);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_rx, &nsd->link_xon_rx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xon_tx, &nsd->link_xon_tx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_rx, &nsd->link_xoff_rx);
960 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->link_xoff_tx, &nsd->link_xoff_tx);
964 for (i = 0; i < 8; i++) {
965 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
966 pf->stat_offsets_loaded,
967 &osd->priority_xoff_rx[i],
968 &nsd->priority_xoff_rx[i]);
969 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
970 pf->stat_offsets_loaded,
971 &osd->priority_xon_rx[i],
972 &nsd->priority_xon_rx[i]);
973 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
974 pf->stat_offsets_loaded,
975 &osd->priority_xon_tx[i],
976 &nsd->priority_xon_tx[i]);
977 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
978 pf->stat_offsets_loaded,
979 &osd->priority_xoff_tx[i],
980 &nsd->priority_xoff_tx[i]);
981 i40e_stat_update32(hw,
982 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
983 pf->stat_offsets_loaded,
984 &osd->priority_xon_2_xoff[i],
985 &nsd->priority_xon_2_xoff[i]);
988 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
989 I40E_GLPRT_PRC64L(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->rx_size_64, &nsd->rx_size_64);
992 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
993 I40E_GLPRT_PRC127L(hw->port),
994 pf->stat_offsets_loaded,
995 &osd->rx_size_127, &nsd->rx_size_127);
996 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
997 I40E_GLPRT_PRC255L(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->rx_size_255, &nsd->rx_size_255);
1000 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1001 I40E_GLPRT_PRC511L(hw->port),
1002 pf->stat_offsets_loaded,
1003 &osd->rx_size_511, &nsd->rx_size_511);
1004 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1005 I40E_GLPRT_PRC1023L(hw->port),
1006 pf->stat_offsets_loaded,
1007 &osd->rx_size_1023, &nsd->rx_size_1023);
1008 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1009 I40E_GLPRT_PRC1522L(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->rx_size_1522, &nsd->rx_size_1522);
1012 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1013 I40E_GLPRT_PRC9522L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->rx_size_big, &nsd->rx_size_big);
1017 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1018 I40E_GLPRT_PTC64L(hw->port),
1019 pf->stat_offsets_loaded,
1020 &osd->tx_size_64, &nsd->tx_size_64);
1021 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1022 I40E_GLPRT_PTC127L(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->tx_size_127, &nsd->tx_size_127);
1025 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1026 I40E_GLPRT_PTC255L(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->tx_size_255, &nsd->tx_size_255);
1029 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1030 I40E_GLPRT_PTC511L(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->tx_size_511, &nsd->tx_size_511);
1033 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1034 I40E_GLPRT_PTC1023L(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->tx_size_1023, &nsd->tx_size_1023);
1037 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1038 I40E_GLPRT_PTC1522L(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->tx_size_1522, &nsd->tx_size_1522);
1041 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1042 I40E_GLPRT_PTC9522L(hw->port),
1043 pf->stat_offsets_loaded,
1044 &osd->tx_size_big, &nsd->tx_size_big);
1046 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_undersize, &nsd->rx_undersize);
1049 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_fragments, &nsd->rx_fragments);
1052 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_oversize, &nsd->rx_oversize);
1055 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_jabber, &nsd->rx_jabber);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_atr_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1066 i40e_stat_update_and_clear32(hw,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1068 &nsd->fd_atr_tunnel_match);
1070 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1071 nsd->tx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1074 nsd->rx_lpi_status =
1075 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1076 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1077 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1080 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1081 pf->stat_offsets_loaded,
1082 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1084 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1085 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1086 nsd->fd_sb_status = true;
1088 nsd->fd_sb_status = false;
1090 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1091 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1092 nsd->fd_atr_status = true;
1094 nsd->fd_atr_status = false;
1096 pf->stat_offsets_loaded = true;
1100 * i40e_update_stats - Update the various statistics counters.
1101 * @vsi: the VSI to be updated
1103 * Update the various stats for this VSI and its related entities.
1105 void i40e_update_stats(struct i40e_vsi *vsi)
1107 struct i40e_pf *pf = vsi->back;
1109 if (vsi == pf->vsi[pf->lan_vsi])
1110 i40e_update_pf_stats(pf);
1112 i40e_update_vsi_stats(vsi);
1116 * i40e_count_filters - counts VSI mac filters
1117 * @vsi: the VSI to be searched
1119 * Returns count of mac filters
1121 int i40e_count_filters(struct i40e_vsi *vsi)
1123 struct i40e_mac_filter *f;
1124 struct hlist_node *h;
1128 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1135 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1136 * @vsi: the VSI to be searched
1137 * @macaddr: the MAC address
1140 * Returns ptr to the filter object or NULL
1142 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1143 const u8 *macaddr, s16 vlan)
1145 struct i40e_mac_filter *f;
1148 if (!vsi || !macaddr)
1151 key = i40e_addr_to_hkey(macaddr);
1152 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1153 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1161 * i40e_find_mac - Find a mac addr in the macvlan filters list
1162 * @vsi: the VSI to be searched
1163 * @macaddr: the MAC address we are searching for
1165 * Returns the first filter with the provided MAC address or NULL if
1166 * MAC address was not found
1168 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1170 struct i40e_mac_filter *f;
1173 if (!vsi || !macaddr)
1176 key = i40e_addr_to_hkey(macaddr);
1177 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1178 if ((ether_addr_equal(macaddr, f->macaddr)))
1185 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1186 * @vsi: the VSI to be searched
1188 * Returns true if VSI is in vlan mode or false otherwise
1190 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1192 /* If we have a PVID, always operate in VLAN mode */
1196 /* We need to operate in VLAN mode whenever we have any filters with
1197 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1198 * time, incurring search cost repeatedly. However, we can notice two
1201 * 1) the only place where we can gain a VLAN filter is in
1204 * 2) the only place where filters are actually removed is in
1205 * i40e_sync_filters_subtask.
1207 * Thus, we can simply use a boolean value, has_vlan_filters which we
1208 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1209 * we have to perform the full search after deleting filters in
1210 * i40e_sync_filters_subtask, but we already have to search
1211 * filters here and can perform the check at the same time. This
1212 * results in avoiding embedding a loop for VLAN mode inside another
1213 * loop over all the filters, and should maintain correctness as noted
1216 return vsi->has_vlan_filter;
1220 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1221 * @vsi: the VSI to configure
1222 * @tmp_add_list: list of filters ready to be added
1223 * @tmp_del_list: list of filters ready to be deleted
1224 * @vlan_filters: the number of active VLAN filters
1226 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1227 * behave as expected. If we have any active VLAN filters remaining or about
1228 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1229 * so that they only match against untagged traffic. If we no longer have any
1230 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1231 * so that they match against both tagged and untagged traffic. In this way,
1232 * we ensure that we correctly receive the desired traffic. This ensures that
1233 * when we have an active VLAN we will receive only untagged traffic and
1234 * traffic matching active VLANs. If we have no active VLANs then we will
1235 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1237 * Finally, in a similar fashion, this function also corrects filters when
1238 * there is an active PVID assigned to this VSI.
1240 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1242 * This function is only expected to be called from within
1243 * i40e_sync_vsi_filters.
1245 * NOTE: This function expects to be called while under the
1246 * mac_filter_hash_lock
1248 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1249 struct hlist_head *tmp_add_list,
1250 struct hlist_head *tmp_del_list,
1253 s16 pvid = le16_to_cpu(vsi->info.pvid);
1254 struct i40e_mac_filter *f, *add_head;
1255 struct i40e_new_mac_filter *new;
1256 struct hlist_node *h;
1259 /* To determine if a particular filter needs to be replaced we
1260 * have the three following conditions:
1262 * a) if we have a PVID assigned, then all filters which are
1263 * not marked as VLAN=PVID must be replaced with filters that
1265 * b) otherwise, if we have any active VLANS, all filters
1266 * which are marked as VLAN=-1 must be replaced with
1267 * filters marked as VLAN=0
1268 * c) finally, if we do not have any active VLANS, all filters
1269 * which are marked as VLAN=0 must be replaced with filters
1273 /* Update the filters about to be added in place */
1274 hlist_for_each_entry(new, tmp_add_list, hlist) {
1275 if (pvid && new->f->vlan != pvid)
1276 new->f->vlan = pvid;
1277 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1279 else if (!vlan_filters && new->f->vlan == 0)
1280 new->f->vlan = I40E_VLAN_ANY;
1283 /* Update the remaining active filters */
1284 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1285 /* Combine the checks for whether a filter needs to be changed
1286 * and then determine the new VLAN inside the if block, in
1287 * order to avoid duplicating code for adding the new filter
1288 * then deleting the old filter.
1290 if ((pvid && f->vlan != pvid) ||
1291 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1292 (!vlan_filters && f->vlan == 0)) {
1293 /* Determine the new vlan we will be adding */
1296 else if (vlan_filters)
1299 new_vlan = I40E_VLAN_ANY;
1301 /* Create the new filter */
1302 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1306 /* Create a temporary i40e_new_mac_filter */
1307 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1312 new->state = add_head->state;
1314 /* Add the new filter to the tmp list */
1315 hlist_add_head(&new->hlist, tmp_add_list);
1317 /* Put the original filter into the delete list */
1318 f->state = I40E_FILTER_REMOVE;
1319 hash_del(&f->hlist);
1320 hlist_add_head(&f->hlist, tmp_del_list);
1324 vsi->has_vlan_filter = !!vlan_filters;
1330 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1331 * @vsi: the PF Main VSI - inappropriate for any other VSI
1332 * @macaddr: the MAC address
1334 * Remove whatever filter the firmware set up so the driver can manage
1335 * its own filtering intelligently.
1337 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1339 struct i40e_aqc_remove_macvlan_element_data element;
1340 struct i40e_pf *pf = vsi->back;
1342 /* Only appropriate for the PF main VSI */
1343 if (vsi->type != I40E_VSI_MAIN)
1346 memset(&element, 0, sizeof(element));
1347 ether_addr_copy(element.mac_addr, macaddr);
1348 element.vlan_tag = 0;
1349 /* Ignore error returns, some firmware does it this way... */
1350 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1351 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1353 memset(&element, 0, sizeof(element));
1354 ether_addr_copy(element.mac_addr, macaddr);
1355 element.vlan_tag = 0;
1356 /* ...and some firmware does it this way. */
1357 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1358 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1359 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1363 * i40e_add_filter - Add a mac/vlan filter to the VSI
1364 * @vsi: the VSI to be searched
1365 * @macaddr: the MAC address
1368 * Returns ptr to the filter object or NULL when no memory available.
1370 * NOTE: This function is expected to be called with mac_filter_hash_lock
1373 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1374 const u8 *macaddr, s16 vlan)
1376 struct i40e_mac_filter *f;
1379 if (!vsi || !macaddr)
1382 f = i40e_find_filter(vsi, macaddr, vlan);
1384 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1388 /* Update the boolean indicating if we need to function in
1392 vsi->has_vlan_filter = true;
1394 ether_addr_copy(f->macaddr, macaddr);
1396 f->state = I40E_FILTER_NEW;
1397 INIT_HLIST_NODE(&f->hlist);
1399 key = i40e_addr_to_hkey(macaddr);
1400 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1402 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1403 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1406 /* If we're asked to add a filter that has been marked for removal, it
1407 * is safe to simply restore it to active state. __i40e_del_filter
1408 * will have simply deleted any filters which were previously marked
1409 * NEW or FAILED, so if it is currently marked REMOVE it must have
1410 * previously been ACTIVE. Since we haven't yet run the sync filters
1411 * task, just restore this filter to the ACTIVE state so that the
1412 * sync task leaves it in place
1414 if (f->state == I40E_FILTER_REMOVE)
1415 f->state = I40E_FILTER_ACTIVE;
1421 * __i40e_del_filter - Remove a specific filter from the VSI
1422 * @vsi: VSI to remove from
1423 * @f: the filter to remove from the list
1425 * This function should be called instead of i40e_del_filter only if you know
1426 * the exact filter you will remove already, such as via i40e_find_filter or
1429 * NOTE: This function is expected to be called with mac_filter_hash_lock
1431 * ANOTHER NOTE: This function MUST be called from within the context of
1432 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1433 * instead of list_for_each_entry().
1435 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1440 /* If the filter was never added to firmware then we can just delete it
1441 * directly and we don't want to set the status to remove or else an
1442 * admin queue command will unnecessarily fire.
1444 if ((f->state == I40E_FILTER_FAILED) ||
1445 (f->state == I40E_FILTER_NEW)) {
1446 hash_del(&f->hlist);
1449 f->state = I40E_FILTER_REMOVE;
1452 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1453 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1457 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1458 * @vsi: the VSI to be searched
1459 * @macaddr: the MAC address
1462 * NOTE: This function is expected to be called with mac_filter_hash_lock
1464 * ANOTHER NOTE: This function MUST be called from within the context of
1465 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1466 * instead of list_for_each_entry().
1468 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1470 struct i40e_mac_filter *f;
1472 if (!vsi || !macaddr)
1475 f = i40e_find_filter(vsi, macaddr, vlan);
1476 __i40e_del_filter(vsi, f);
1480 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1481 * @vsi: the VSI to be searched
1482 * @macaddr: the mac address to be filtered
1484 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1485 * go through all the macvlan filters and add a macvlan filter for each
1486 * unique vlan that already exists. If a PVID has been assigned, instead only
1487 * add the macaddr to that VLAN.
1489 * Returns last filter added on success, else NULL
1491 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1494 struct i40e_mac_filter *f, *add = NULL;
1495 struct hlist_node *h;
1499 return i40e_add_filter(vsi, macaddr,
1500 le16_to_cpu(vsi->info.pvid));
1502 if (!i40e_is_vsi_in_vlan(vsi))
1503 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1505 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1506 if (f->state == I40E_FILTER_REMOVE)
1508 add = i40e_add_filter(vsi, macaddr, f->vlan);
1517 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1518 * @vsi: the VSI to be searched
1519 * @macaddr: the mac address to be removed
1521 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1524 * Returns 0 for success, or error
1526 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1528 struct i40e_mac_filter *f;
1529 struct hlist_node *h;
1533 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1534 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1535 if (ether_addr_equal(macaddr, f->macaddr)) {
1536 __i40e_del_filter(vsi, f);
1548 * i40e_set_mac - NDO callback to set mac address
1549 * @netdev: network interface device structure
1550 * @p: pointer to an address structure
1552 * Returns 0 on success, negative on failure
1554 static int i40e_set_mac(struct net_device *netdev, void *p)
1556 struct i40e_netdev_priv *np = netdev_priv(netdev);
1557 struct i40e_vsi *vsi = np->vsi;
1558 struct i40e_pf *pf = vsi->back;
1559 struct i40e_hw *hw = &pf->hw;
1560 struct sockaddr *addr = p;
1562 if (!is_valid_ether_addr(addr->sa_data))
1563 return -EADDRNOTAVAIL;
1565 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1566 netdev_info(netdev, "already using mac address %pM\n",
1571 if (test_bit(__I40E_DOWN, pf->state) ||
1572 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1573 return -EADDRNOTAVAIL;
1575 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1576 netdev_info(netdev, "returning to hw mac address %pM\n",
1579 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1581 /* Copy the address first, so that we avoid a possible race with
1583 * - Remove old address from MAC filter
1584 * - Copy new address
1585 * - Add new address to MAC filter
1587 spin_lock_bh(&vsi->mac_filter_hash_lock);
1588 i40e_del_mac_filter(vsi, netdev->dev_addr);
1589 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1590 i40e_add_mac_filter(vsi, netdev->dev_addr);
1591 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1593 if (vsi->type == I40E_VSI_MAIN) {
1596 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1597 addr->sa_data, NULL);
1599 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1600 i40e_stat_str(hw, ret),
1601 i40e_aq_str(hw, hw->aq.asq_last_status));
1604 /* schedule our worker thread which will take care of
1605 * applying the new filter changes
1607 i40e_service_event_schedule(pf);
1612 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1613 * @vsi: vsi structure
1614 * @seed: RSS hash seed
1615 * @lut: pointer to lookup table of lut_size
1616 * @lut_size: size of the lookup table
1618 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1619 u8 *lut, u16 lut_size)
1621 struct i40e_pf *pf = vsi->back;
1622 struct i40e_hw *hw = &pf->hw;
1626 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1627 (struct i40e_aqc_get_set_rss_key_data *)seed;
1628 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1630 dev_info(&pf->pdev->dev,
1631 "Cannot set RSS key, err %s aq_err %s\n",
1632 i40e_stat_str(hw, ret),
1633 i40e_aq_str(hw, hw->aq.asq_last_status));
1638 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1640 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1642 dev_info(&pf->pdev->dev,
1643 "Cannot set RSS lut, err %s aq_err %s\n",
1644 i40e_stat_str(hw, ret),
1645 i40e_aq_str(hw, hw->aq.asq_last_status));
1653 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1654 * @vsi: VSI structure
1656 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1658 struct i40e_pf *pf = vsi->back;
1659 u8 seed[I40E_HKEY_ARRAY_SIZE];
1663 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1666 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1667 vsi->num_queue_pairs);
1670 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1674 /* Use the user configured hash keys and lookup table if there is one,
1675 * otherwise use default
1677 if (vsi->rss_lut_user)
1678 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1680 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1681 if (vsi->rss_hkey_user)
1682 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1684 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1685 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1691 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1692 * @vsi: the VSI being configured,
1693 * @ctxt: VSI context structure
1694 * @enabled_tc: number of traffic classes to enable
1696 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1698 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1699 struct i40e_vsi_context *ctxt,
1702 u16 qcount = 0, max_qcount, qmap, sections = 0;
1703 int i, override_q, pow, num_qps, ret;
1704 u8 netdev_tc = 0, offset = 0;
1706 if (vsi->type != I40E_VSI_MAIN)
1708 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1709 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1710 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1711 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1712 num_qps = vsi->mqprio_qopt.qopt.count[0];
1714 /* find the next higher power-of-2 of num queue pairs */
1715 pow = ilog2(num_qps);
1716 if (!is_power_of_2(num_qps))
1718 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1719 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1721 /* Setup queue offset/count for all TCs for given VSI */
1722 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1723 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1724 /* See if the given TC is enabled for the given VSI */
1725 if (vsi->tc_config.enabled_tc & BIT(i)) {
1726 offset = vsi->mqprio_qopt.qopt.offset[i];
1727 qcount = vsi->mqprio_qopt.qopt.count[i];
1728 if (qcount > max_qcount)
1729 max_qcount = qcount;
1730 vsi->tc_config.tc_info[i].qoffset = offset;
1731 vsi->tc_config.tc_info[i].qcount = qcount;
1732 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1734 /* TC is not enabled so set the offset to
1735 * default queue and allocate one queue
1738 vsi->tc_config.tc_info[i].qoffset = 0;
1739 vsi->tc_config.tc_info[i].qcount = 1;
1740 vsi->tc_config.tc_info[i].netdev_tc = 0;
1744 /* Set actual Tx/Rx queue pairs */
1745 vsi->num_queue_pairs = offset + qcount;
1747 /* Setup queue TC[0].qmap for given VSI context */
1748 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1749 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1750 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1751 ctxt->info.valid_sections |= cpu_to_le16(sections);
1753 /* Reconfigure RSS for main VSI with max queue count */
1754 vsi->rss_size = max_qcount;
1755 ret = i40e_vsi_config_rss(vsi);
1757 dev_info(&vsi->back->pdev->dev,
1758 "Failed to reconfig rss for num_queues (%u)\n",
1762 vsi->reconfig_rss = true;
1763 dev_dbg(&vsi->back->pdev->dev,
1764 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1766 /* Find queue count available for channel VSIs and starting offset
1769 override_q = vsi->mqprio_qopt.qopt.count[0];
1770 if (override_q && override_q < vsi->num_queue_pairs) {
1771 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1772 vsi->next_base_queue = override_q;
1778 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1779 * @vsi: the VSI being setup
1780 * @ctxt: VSI context structure
1781 * @enabled_tc: Enabled TCs bitmap
1782 * @is_add: True if called before Add VSI
1784 * Setup VSI queue mapping for enabled traffic classes.
1786 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1787 struct i40e_vsi_context *ctxt,
1791 struct i40e_pf *pf = vsi->back;
1801 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1804 /* Number of queues per enabled TC */
1805 num_tc_qps = vsi->alloc_queue_pairs;
1806 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1807 /* Find numtc from enabled TC bitmap */
1808 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1809 if (enabled_tc & BIT(i)) /* TC is enabled */
1813 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1816 num_tc_qps = num_tc_qps / numtc;
1817 num_tc_qps = min_t(int, num_tc_qps,
1818 i40e_pf_get_max_q_per_tc(pf));
1821 vsi->tc_config.numtc = numtc;
1822 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1824 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1825 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1826 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1828 /* Setup queue offset/count for all TCs for given VSI */
1829 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1830 /* See if the given TC is enabled for the given VSI */
1831 if (vsi->tc_config.enabled_tc & BIT(i)) {
1835 switch (vsi->type) {
1837 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1838 I40E_FLAG_FD_ATR_ENABLED)) ||
1839 vsi->tc_config.enabled_tc != 1) {
1840 qcount = min_t(int, pf->alloc_rss_size,
1846 case I40E_VSI_SRIOV:
1847 case I40E_VSI_VMDQ2:
1849 qcount = num_tc_qps;
1853 vsi->tc_config.tc_info[i].qoffset = offset;
1854 vsi->tc_config.tc_info[i].qcount = qcount;
1856 /* find the next higher power-of-2 of num queue pairs */
1859 while (num_qps && (BIT_ULL(pow) < qcount)) {
1864 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1866 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1867 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1871 /* TC is not enabled so set the offset to
1872 * default queue and allocate one queue
1875 vsi->tc_config.tc_info[i].qoffset = 0;
1876 vsi->tc_config.tc_info[i].qcount = 1;
1877 vsi->tc_config.tc_info[i].netdev_tc = 0;
1881 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1884 /* Set actual Tx/Rx queue pairs */
1885 vsi->num_queue_pairs = offset;
1886 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1887 if (vsi->req_queue_pairs > 0)
1888 vsi->num_queue_pairs = vsi->req_queue_pairs;
1889 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1890 vsi->num_queue_pairs = pf->num_lan_msix;
1893 /* Scheduler section valid can only be set for ADD VSI */
1895 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1897 ctxt->info.up_enable_bits = enabled_tc;
1899 if (vsi->type == I40E_VSI_SRIOV) {
1900 ctxt->info.mapping_flags |=
1901 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1902 for (i = 0; i < vsi->num_queue_pairs; i++)
1903 ctxt->info.queue_mapping[i] =
1904 cpu_to_le16(vsi->base_queue + i);
1906 ctxt->info.mapping_flags |=
1907 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1908 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1910 ctxt->info.valid_sections |= cpu_to_le16(sections);
1914 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1915 * @netdev: the netdevice
1916 * @addr: address to add
1918 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1919 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1921 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1923 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi;
1926 if (i40e_add_mac_filter(vsi, addr))
1933 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1934 * @netdev: the netdevice
1935 * @addr: address to add
1937 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1938 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1940 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1942 struct i40e_netdev_priv *np = netdev_priv(netdev);
1943 struct i40e_vsi *vsi = np->vsi;
1945 /* Under some circumstances, we might receive a request to delete
1946 * our own device address from our uc list. Because we store the
1947 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1948 * such requests and not delete our device address from this list.
1950 if (ether_addr_equal(addr, netdev->dev_addr))
1953 i40e_del_mac_filter(vsi, addr);
1959 * i40e_set_rx_mode - NDO callback to set the netdev filters
1960 * @netdev: network interface device structure
1962 static void i40e_set_rx_mode(struct net_device *netdev)
1964 struct i40e_netdev_priv *np = netdev_priv(netdev);
1965 struct i40e_vsi *vsi = np->vsi;
1967 spin_lock_bh(&vsi->mac_filter_hash_lock);
1969 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1970 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1972 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1974 /* check for other flag changes */
1975 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1976 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1977 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1982 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1983 * @vsi: Pointer to VSI struct
1984 * @from: Pointer to list which contains MAC filter entries - changes to
1985 * those entries needs to be undone.
1987 * MAC filter entries from this list were slated for deletion.
1989 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1990 struct hlist_head *from)
1992 struct i40e_mac_filter *f;
1993 struct hlist_node *h;
1995 hlist_for_each_entry_safe(f, h, from, hlist) {
1996 u64 key = i40e_addr_to_hkey(f->macaddr);
1998 /* Move the element back into MAC filter list*/
1999 hlist_del(&f->hlist);
2000 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2005 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2006 * @vsi: Pointer to vsi struct
2007 * @from: Pointer to list which contains MAC filter entries - changes to
2008 * those entries needs to be undone.
2010 * MAC filter entries from this list were slated for addition.
2012 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2013 struct hlist_head *from)
2015 struct i40e_new_mac_filter *new;
2016 struct hlist_node *h;
2018 hlist_for_each_entry_safe(new, h, from, hlist) {
2019 /* We can simply free the wrapper structure */
2020 hlist_del(&new->hlist);
2026 * i40e_next_entry - Get the next non-broadcast filter from a list
2027 * @next: pointer to filter in list
2029 * Returns the next non-broadcast filter in the list. Required so that we
2030 * ignore broadcast filters within the list, since these are not handled via
2031 * the normal firmware update path.
2034 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2036 hlist_for_each_entry_continue(next, hlist) {
2037 if (!is_broadcast_ether_addr(next->f->macaddr))
2045 * i40e_update_filter_state - Update filter state based on return data
2047 * @count: Number of filters added
2048 * @add_list: return data from fw
2049 * @add_head: pointer to first filter in current batch
2051 * MAC filter entries from list were slated to be added to device. Returns
2052 * number of successful filters. Note that 0 does NOT mean success!
2055 i40e_update_filter_state(int count,
2056 struct i40e_aqc_add_macvlan_element_data *add_list,
2057 struct i40e_new_mac_filter *add_head)
2062 for (i = 0; i < count; i++) {
2063 /* Always check status of each filter. We don't need to check
2064 * the firmware return status because we pre-set the filter
2065 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2066 * request to the adminq. Thus, if it no longer matches then
2067 * we know the filter is active.
2069 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2070 add_head->state = I40E_FILTER_FAILED;
2072 add_head->state = I40E_FILTER_ACTIVE;
2076 add_head = i40e_next_filter(add_head);
2085 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2086 * @vsi: ptr to the VSI
2087 * @vsi_name: name to display in messages
2088 * @list: the list of filters to send to firmware
2089 * @num_del: the number of filters to delete
2090 * @retval: Set to -EIO on failure to delete
2092 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2093 * *retval instead of a return value so that success does not force ret_val to
2094 * be set to 0. This ensures that a sequence of calls to this function
2095 * preserve the previous value of *retval on successful delete.
2098 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2099 struct i40e_aqc_remove_macvlan_element_data *list,
2100 int num_del, int *retval)
2102 struct i40e_hw *hw = &vsi->back->hw;
2106 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2107 aq_err = hw->aq.asq_last_status;
2109 /* Explicitly ignore and do not report when firmware returns ENOENT */
2110 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2112 dev_info(&vsi->back->pdev->dev,
2113 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2114 vsi_name, i40e_stat_str(hw, aq_ret),
2115 i40e_aq_str(hw, aq_err));
2120 * i40e_aqc_add_filters - Request firmware to add a set of filters
2121 * @vsi: ptr to the VSI
2122 * @vsi_name: name to display in messages
2123 * @list: the list of filters to send to firmware
2124 * @add_head: Position in the add hlist
2125 * @num_add: the number of filters to add
2127 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2128 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2129 * space for more filters.
2132 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2133 struct i40e_aqc_add_macvlan_element_data *list,
2134 struct i40e_new_mac_filter *add_head,
2137 struct i40e_hw *hw = &vsi->back->hw;
2140 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2141 aq_err = hw->aq.asq_last_status;
2142 fcnt = i40e_update_filter_state(num_add, list, add_head);
2144 if (fcnt != num_add) {
2145 if (vsi->type == I40E_VSI_MAIN) {
2146 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2147 dev_warn(&vsi->back->pdev->dev,
2148 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2149 i40e_aq_str(hw, aq_err), vsi_name);
2150 } else if (vsi->type == I40E_VSI_SRIOV ||
2151 vsi->type == I40E_VSI_VMDQ1 ||
2152 vsi->type == I40E_VSI_VMDQ2) {
2153 dev_warn(&vsi->back->pdev->dev,
2154 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2155 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2157 dev_warn(&vsi->back->pdev->dev,
2158 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2159 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2165 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2166 * @vsi: pointer to the VSI
2167 * @vsi_name: the VSI name
2170 * This function sets or clears the promiscuous broadcast flags for VLAN
2171 * filters in order to properly receive broadcast frames. Assumes that only
2172 * broadcast filters are passed.
2174 * Returns status indicating success or failure;
2177 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2178 struct i40e_mac_filter *f)
2180 bool enable = f->state == I40E_FILTER_NEW;
2181 struct i40e_hw *hw = &vsi->back->hw;
2184 if (f->vlan == I40E_VLAN_ANY) {
2185 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2190 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2198 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2199 dev_warn(&vsi->back->pdev->dev,
2200 "Error %s, forcing overflow promiscuous on %s\n",
2201 i40e_aq_str(hw, hw->aq.asq_last_status),
2209 * i40e_set_promiscuous - set promiscuous mode
2210 * @pf: board private structure
2211 * @promisc: promisc on or off
2213 * There are different ways of setting promiscuous mode on a PF depending on
2214 * what state/environment we're in. This identifies and sets it appropriately.
2215 * Returns 0 on success.
2217 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2219 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2220 struct i40e_hw *hw = &pf->hw;
2223 if (vsi->type == I40E_VSI_MAIN &&
2224 pf->lan_veb != I40E_NO_VEB &&
2225 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2226 /* set defport ON for Main VSI instead of true promisc
2227 * this way we will get all unicast/multicast and VLAN
2228 * promisc behavior but will not get VF or VMDq traffic
2229 * replicated on the Main VSI.
2232 aq_ret = i40e_aq_set_default_vsi(hw,
2236 aq_ret = i40e_aq_clear_default_vsi(hw,
2240 dev_info(&pf->pdev->dev,
2241 "Set default VSI failed, err %s, aq_err %s\n",
2242 i40e_stat_str(hw, aq_ret),
2243 i40e_aq_str(hw, hw->aq.asq_last_status));
2246 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2252 dev_info(&pf->pdev->dev,
2253 "set unicast promisc failed, err %s, aq_err %s\n",
2254 i40e_stat_str(hw, aq_ret),
2255 i40e_aq_str(hw, hw->aq.asq_last_status));
2257 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2262 dev_info(&pf->pdev->dev,
2263 "set multicast promisc failed, err %s, aq_err %s\n",
2264 i40e_stat_str(hw, aq_ret),
2265 i40e_aq_str(hw, hw->aq.asq_last_status));
2270 pf->cur_promisc = promisc;
2276 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2277 * @vsi: ptr to the VSI
2279 * Push any outstanding VSI filter changes through the AdminQ.
2281 * Returns 0 or error value
2283 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2285 struct hlist_head tmp_add_list, tmp_del_list;
2286 struct i40e_mac_filter *f;
2287 struct i40e_new_mac_filter *new, *add_head = NULL;
2288 struct i40e_hw *hw = &vsi->back->hw;
2289 bool old_overflow, new_overflow;
2290 unsigned int failed_filters = 0;
2291 unsigned int vlan_filters = 0;
2292 char vsi_name[16] = "PF";
2293 int filter_list_len = 0;
2294 i40e_status aq_ret = 0;
2295 u32 changed_flags = 0;
2296 struct hlist_node *h;
2305 /* empty array typed pointers, kcalloc later */
2306 struct i40e_aqc_add_macvlan_element_data *add_list;
2307 struct i40e_aqc_remove_macvlan_element_data *del_list;
2309 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2310 usleep_range(1000, 2000);
2313 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2316 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2317 vsi->current_netdev_flags = vsi->netdev->flags;
2320 INIT_HLIST_HEAD(&tmp_add_list);
2321 INIT_HLIST_HEAD(&tmp_del_list);
2323 if (vsi->type == I40E_VSI_SRIOV)
2324 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2325 else if (vsi->type != I40E_VSI_MAIN)
2326 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2328 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2329 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2331 spin_lock_bh(&vsi->mac_filter_hash_lock);
2332 /* Create a list of filters to delete. */
2333 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2334 if (f->state == I40E_FILTER_REMOVE) {
2335 /* Move the element into temporary del_list */
2336 hash_del(&f->hlist);
2337 hlist_add_head(&f->hlist, &tmp_del_list);
2339 /* Avoid counting removed filters */
2342 if (f->state == I40E_FILTER_NEW) {
2343 /* Create a temporary i40e_new_mac_filter */
2344 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2346 goto err_no_memory_locked;
2348 /* Store pointer to the real filter */
2350 new->state = f->state;
2352 /* Add it to the hash list */
2353 hlist_add_head(&new->hlist, &tmp_add_list);
2356 /* Count the number of active (current and new) VLAN
2357 * filters we have now. Does not count filters which
2358 * are marked for deletion.
2364 retval = i40e_correct_mac_vlan_filters(vsi,
2369 goto err_no_memory_locked;
2371 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2374 /* Now process 'del_list' outside the lock */
2375 if (!hlist_empty(&tmp_del_list)) {
2376 filter_list_len = hw->aq.asq_buf_size /
2377 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2378 list_size = filter_list_len *
2379 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2380 del_list = kzalloc(list_size, GFP_ATOMIC);
2384 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2387 /* handle broadcast filters by updating the broadcast
2388 * promiscuous flag and release filter list.
2390 if (is_broadcast_ether_addr(f->macaddr)) {
2391 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2393 hlist_del(&f->hlist);
2398 /* add to delete list */
2399 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2400 if (f->vlan == I40E_VLAN_ANY) {
2401 del_list[num_del].vlan_tag = 0;
2402 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2404 del_list[num_del].vlan_tag =
2405 cpu_to_le16((u16)(f->vlan));
2408 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2409 del_list[num_del].flags = cmd_flags;
2412 /* flush a full buffer */
2413 if (num_del == filter_list_len) {
2414 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2416 memset(del_list, 0, list_size);
2419 /* Release memory for MAC filter entries which were
2420 * synced up with HW.
2422 hlist_del(&f->hlist);
2427 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2435 if (!hlist_empty(&tmp_add_list)) {
2436 /* Do all the adds now. */
2437 filter_list_len = hw->aq.asq_buf_size /
2438 sizeof(struct i40e_aqc_add_macvlan_element_data);
2439 list_size = filter_list_len *
2440 sizeof(struct i40e_aqc_add_macvlan_element_data);
2441 add_list = kzalloc(list_size, GFP_ATOMIC);
2446 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2447 /* handle broadcast filters by updating the broadcast
2448 * promiscuous flag instead of adding a MAC filter.
2450 if (is_broadcast_ether_addr(new->f->macaddr)) {
2451 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2453 new->state = I40E_FILTER_FAILED;
2455 new->state = I40E_FILTER_ACTIVE;
2459 /* add to add array */
2463 ether_addr_copy(add_list[num_add].mac_addr,
2465 if (new->f->vlan == I40E_VLAN_ANY) {
2466 add_list[num_add].vlan_tag = 0;
2467 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2469 add_list[num_add].vlan_tag =
2470 cpu_to_le16((u16)(new->f->vlan));
2472 add_list[num_add].queue_number = 0;
2473 /* set invalid match method for later detection */
2474 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2475 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2476 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2479 /* flush a full buffer */
2480 if (num_add == filter_list_len) {
2481 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2483 memset(add_list, 0, list_size);
2488 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2491 /* Now move all of the filters from the temp add list back to
2494 spin_lock_bh(&vsi->mac_filter_hash_lock);
2495 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2496 /* Only update the state if we're still NEW */
2497 if (new->f->state == I40E_FILTER_NEW)
2498 new->f->state = new->state;
2499 hlist_del(&new->hlist);
2502 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2507 /* Determine the number of active and failed filters. */
2508 spin_lock_bh(&vsi->mac_filter_hash_lock);
2509 vsi->active_filters = 0;
2510 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2511 if (f->state == I40E_FILTER_ACTIVE)
2512 vsi->active_filters++;
2513 else if (f->state == I40E_FILTER_FAILED)
2516 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2518 /* Check if we are able to exit overflow promiscuous mode. We can
2519 * safely exit if we didn't just enter, we no longer have any failed
2520 * filters, and we have reduced filters below the threshold value.
2522 if (old_overflow && !failed_filters &&
2523 vsi->active_filters < vsi->promisc_threshold) {
2524 dev_info(&pf->pdev->dev,
2525 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2527 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2528 vsi->promisc_threshold = 0;
2531 /* if the VF is not trusted do not do promisc */
2532 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2533 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2537 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2539 /* If we are entering overflow promiscuous, we need to calculate a new
2540 * threshold for when we are safe to exit
2542 if (!old_overflow && new_overflow)
2543 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2545 /* check for changes in promiscuous modes */
2546 if (changed_flags & IFF_ALLMULTI) {
2547 bool cur_multipromisc;
2549 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2550 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2555 retval = i40e_aq_rc_to_posix(aq_ret,
2556 hw->aq.asq_last_status);
2557 dev_info(&pf->pdev->dev,
2558 "set multi promisc failed on %s, err %s aq_err %s\n",
2560 i40e_stat_str(hw, aq_ret),
2561 i40e_aq_str(hw, hw->aq.asq_last_status));
2563 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2565 cur_multipromisc ? "entering" : "leaving");
2569 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2572 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2574 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2576 retval = i40e_aq_rc_to_posix(aq_ret,
2577 hw->aq.asq_last_status);
2578 dev_info(&pf->pdev->dev,
2579 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2580 cur_promisc ? "on" : "off",
2582 i40e_stat_str(hw, aq_ret),
2583 i40e_aq_str(hw, hw->aq.asq_last_status));
2587 /* if something went wrong then set the changed flag so we try again */
2589 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2591 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2595 /* Restore elements on the temporary add and delete lists */
2596 spin_lock_bh(&vsi->mac_filter_hash_lock);
2597 err_no_memory_locked:
2598 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2599 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2600 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2602 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2603 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2608 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2609 * @pf: board private structure
2611 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2617 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2619 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2620 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2624 for (v = 0; v < pf->num_alloc_vsi; v++) {
2626 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2627 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2630 /* come back and try again later */
2631 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2637 clear_bit(__I40E_VF_DISABLE, pf->state);
2641 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2644 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2646 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2647 return I40E_RXBUFFER_2048;
2649 return I40E_RXBUFFER_3072;
2653 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2654 * @netdev: network interface device structure
2655 * @new_mtu: new value for maximum frame size
2657 * Returns 0 on success, negative on failure
2659 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2661 struct i40e_netdev_priv *np = netdev_priv(netdev);
2662 struct i40e_vsi *vsi = np->vsi;
2663 struct i40e_pf *pf = vsi->back;
2665 if (i40e_enabled_xdp_vsi(vsi)) {
2666 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2668 if (frame_size > i40e_max_xdp_frame_size(vsi))
2672 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2673 netdev->mtu, new_mtu);
2674 netdev->mtu = new_mtu;
2675 if (netif_running(netdev))
2676 i40e_vsi_reinit_locked(vsi);
2677 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2678 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2683 * i40e_ioctl - Access the hwtstamp interface
2684 * @netdev: network interface device structure
2685 * @ifr: interface request data
2686 * @cmd: ioctl command
2688 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2690 struct i40e_netdev_priv *np = netdev_priv(netdev);
2691 struct i40e_pf *pf = np->vsi->back;
2695 return i40e_ptp_get_ts_config(pf, ifr);
2697 return i40e_ptp_set_ts_config(pf, ifr);
2704 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2705 * @vsi: the vsi being adjusted
2707 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2709 struct i40e_vsi_context ctxt;
2712 /* Don't modify stripping options if a port VLAN is active */
2716 if ((vsi->info.valid_sections &
2717 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2718 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2719 return; /* already enabled */
2721 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2722 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2723 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2725 ctxt.seid = vsi->seid;
2726 ctxt.info = vsi->info;
2727 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2729 dev_info(&vsi->back->pdev->dev,
2730 "update vlan stripping failed, err %s aq_err %s\n",
2731 i40e_stat_str(&vsi->back->hw, ret),
2732 i40e_aq_str(&vsi->back->hw,
2733 vsi->back->hw.aq.asq_last_status));
2738 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2739 * @vsi: the vsi being adjusted
2741 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2743 struct i40e_vsi_context ctxt;
2746 /* Don't modify stripping options if a port VLAN is active */
2750 if ((vsi->info.valid_sections &
2751 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2752 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2753 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2754 return; /* already disabled */
2756 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2757 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2758 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2760 ctxt.seid = vsi->seid;
2761 ctxt.info = vsi->info;
2762 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2764 dev_info(&vsi->back->pdev->dev,
2765 "update vlan stripping failed, err %s aq_err %s\n",
2766 i40e_stat_str(&vsi->back->hw, ret),
2767 i40e_aq_str(&vsi->back->hw,
2768 vsi->back->hw.aq.asq_last_status));
2773 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2774 * @vsi: the vsi being configured
2775 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2777 * This is a helper function for adding a new MAC/VLAN filter with the
2778 * specified VLAN for each existing MAC address already in the hash table.
2779 * This function does *not* perform any accounting to update filters based on
2782 * NOTE: this function expects to be called while under the
2783 * mac_filter_hash_lock
2785 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2787 struct i40e_mac_filter *f, *add_f;
2788 struct hlist_node *h;
2791 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2792 if (f->state == I40E_FILTER_REMOVE)
2794 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2796 dev_info(&vsi->back->pdev->dev,
2797 "Could not add vlan filter %d for %pM\n",
2807 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2808 * @vsi: the VSI being configured
2809 * @vid: VLAN id to be added
2811 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2818 /* The network stack will attempt to add VID=0, with the intention to
2819 * receive priority tagged packets with a VLAN of 0. Our HW receives
2820 * these packets by default when configured to receive untagged
2821 * packets, so we don't need to add a filter for this case.
2822 * Additionally, HW interprets adding a VID=0 filter as meaning to
2823 * receive *only* tagged traffic and stops receiving untagged traffic.
2824 * Thus, we do not want to actually add a filter for VID=0
2829 /* Locked once because all functions invoked below iterates list*/
2830 spin_lock_bh(&vsi->mac_filter_hash_lock);
2831 err = i40e_add_vlan_all_mac(vsi, vid);
2832 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2836 /* schedule our worker thread which will take care of
2837 * applying the new filter changes
2839 i40e_service_event_schedule(vsi->back);
2844 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2845 * @vsi: the vsi being configured
2846 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2848 * This function should be used to remove all VLAN filters which match the
2849 * given VID. It does not schedule the service event and does not take the
2850 * mac_filter_hash_lock so it may be combined with other operations under
2851 * a single invocation of the mac_filter_hash_lock.
2853 * NOTE: this function expects to be called while under the
2854 * mac_filter_hash_lock
2856 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2858 struct i40e_mac_filter *f;
2859 struct hlist_node *h;
2862 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2864 __i40e_del_filter(vsi, f);
2869 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2870 * @vsi: the VSI being configured
2871 * @vid: VLAN id to be removed
2873 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2875 if (!vid || vsi->info.pvid)
2878 spin_lock_bh(&vsi->mac_filter_hash_lock);
2879 i40e_rm_vlan_all_mac(vsi, vid);
2880 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2882 /* schedule our worker thread which will take care of
2883 * applying the new filter changes
2885 i40e_service_event_schedule(vsi->back);
2889 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2890 * @netdev: network interface to be adjusted
2891 * @proto: unused protocol value
2892 * @vid: vlan id to be added
2894 * net_device_ops implementation for adding vlan ids
2896 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2897 __always_unused __be16 proto, u16 vid)
2899 struct i40e_netdev_priv *np = netdev_priv(netdev);
2900 struct i40e_vsi *vsi = np->vsi;
2903 if (vid >= VLAN_N_VID)
2906 ret = i40e_vsi_add_vlan(vsi, vid);
2908 set_bit(vid, vsi->active_vlans);
2914 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2915 * @netdev: network interface to be adjusted
2916 * @proto: unused protocol value
2917 * @vid: vlan id to be added
2919 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2920 __always_unused __be16 proto, u16 vid)
2922 struct i40e_netdev_priv *np = netdev_priv(netdev);
2923 struct i40e_vsi *vsi = np->vsi;
2925 if (vid >= VLAN_N_VID)
2927 set_bit(vid, vsi->active_vlans);
2931 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2932 * @netdev: network interface to be adjusted
2933 * @proto: unused protocol value
2934 * @vid: vlan id to be removed
2936 * net_device_ops implementation for removing vlan ids
2938 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2939 __always_unused __be16 proto, u16 vid)
2941 struct i40e_netdev_priv *np = netdev_priv(netdev);
2942 struct i40e_vsi *vsi = np->vsi;
2944 /* return code is ignored as there is nothing a user
2945 * can do about failure to remove and a log message was
2946 * already printed from the other function
2948 i40e_vsi_kill_vlan(vsi, vid);
2950 clear_bit(vid, vsi->active_vlans);
2956 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2957 * @vsi: the vsi being brought back up
2959 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2966 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2967 i40e_vlan_stripping_enable(vsi);
2969 i40e_vlan_stripping_disable(vsi);
2971 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2972 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2977 * i40e_vsi_add_pvid - Add pvid for the VSI
2978 * @vsi: the vsi being adjusted
2979 * @vid: the vlan id to set as a PVID
2981 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2983 struct i40e_vsi_context ctxt;
2986 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2987 vsi->info.pvid = cpu_to_le16(vid);
2988 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2989 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2990 I40E_AQ_VSI_PVLAN_EMOD_STR;
2992 ctxt.seid = vsi->seid;
2993 ctxt.info = vsi->info;
2994 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2996 dev_info(&vsi->back->pdev->dev,
2997 "add pvid failed, err %s aq_err %s\n",
2998 i40e_stat_str(&vsi->back->hw, ret),
2999 i40e_aq_str(&vsi->back->hw,
3000 vsi->back->hw.aq.asq_last_status));
3008 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3009 * @vsi: the vsi being adjusted
3011 * Just use the vlan_rx_register() service to put it back to normal
3013 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3017 i40e_vlan_stripping_disable(vsi);
3021 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3022 * @vsi: ptr to the VSI
3024 * If this function returns with an error, then it's possible one or
3025 * more of the rings is populated (while the rest are not). It is the
3026 * callers duty to clean those orphaned rings.
3028 * Return 0 on success, negative on failure
3030 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3034 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3035 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3037 if (!i40e_enabled_xdp_vsi(vsi))
3040 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3041 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3047 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3048 * @vsi: ptr to the VSI
3050 * Free VSI's transmit software resources
3052 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3056 if (vsi->tx_rings) {
3057 for (i = 0; i < vsi->num_queue_pairs; i++)
3058 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3059 i40e_free_tx_resources(vsi->tx_rings[i]);
3062 if (vsi->xdp_rings) {
3063 for (i = 0; i < vsi->num_queue_pairs; i++)
3064 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3065 i40e_free_tx_resources(vsi->xdp_rings[i]);
3070 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3071 * @vsi: ptr to the VSI
3073 * If this function returns with an error, then it's possible one or
3074 * more of the rings is populated (while the rest are not). It is the
3075 * callers duty to clean those orphaned rings.
3077 * Return 0 on success, negative on failure
3079 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3083 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3084 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3089 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3090 * @vsi: ptr to the VSI
3092 * Free all receive software resources
3094 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3101 for (i = 0; i < vsi->num_queue_pairs; i++)
3102 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3103 i40e_free_rx_resources(vsi->rx_rings[i]);
3107 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3108 * @ring: The Tx ring to configure
3110 * This enables/disables XPS for a given Tx descriptor ring
3111 * based on the TCs enabled for the VSI that ring belongs to.
3113 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3117 if (!ring->q_vector || !ring->netdev || ring->ch)
3120 /* We only initialize XPS once, so as not to overwrite user settings */
3121 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3124 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3125 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3130 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3131 * @ring: The Tx or Rx ring
3133 * Returns the AF_XDP buffer pool or NULL.
3135 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3137 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3138 int qid = ring->queue_index;
3140 if (ring_is_xdp(ring))
3141 qid -= ring->vsi->alloc_queue_pairs;
3143 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3146 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3150 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3151 * @ring: The Tx ring to configure
3153 * Configure the Tx descriptor ring in the HMC context.
3155 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3157 struct i40e_vsi *vsi = ring->vsi;
3158 u16 pf_q = vsi->base_queue + ring->queue_index;
3159 struct i40e_hw *hw = &vsi->back->hw;
3160 struct i40e_hmc_obj_txq tx_ctx;
3161 i40e_status err = 0;
3164 if (ring_is_xdp(ring))
3165 ring->xsk_pool = i40e_xsk_pool(ring);
3167 /* some ATR related tx ring init */
3168 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3169 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3170 ring->atr_count = 0;
3172 ring->atr_sample_rate = 0;
3176 i40e_config_xps_tx_ring(ring);
3178 /* clear the context structure first */
3179 memset(&tx_ctx, 0, sizeof(tx_ctx));
3181 tx_ctx.new_context = 1;
3182 tx_ctx.base = (ring->dma / 128);
3183 tx_ctx.qlen = ring->count;
3184 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3185 I40E_FLAG_FD_ATR_ENABLED));
3186 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3187 /* FDIR VSI tx ring can still use RS bit and writebacks */
3188 if (vsi->type != I40E_VSI_FDIR)
3189 tx_ctx.head_wb_ena = 1;
3190 tx_ctx.head_wb_addr = ring->dma +
3191 (ring->count * sizeof(struct i40e_tx_desc));
3193 /* As part of VSI creation/update, FW allocates certain
3194 * Tx arbitration queue sets for each TC enabled for
3195 * the VSI. The FW returns the handles to these queue
3196 * sets as part of the response buffer to Add VSI,
3197 * Update VSI, etc. AQ commands. It is expected that
3198 * these queue set handles be associated with the Tx
3199 * queues by the driver as part of the TX queue context
3200 * initialization. This has to be done regardless of
3201 * DCB as by default everything is mapped to TC0.
3206 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3209 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3211 tx_ctx.rdylist_act = 0;
3213 /* clear the context in the HMC */
3214 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3216 dev_info(&vsi->back->pdev->dev,
3217 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3218 ring->queue_index, pf_q, err);
3222 /* set the context in the HMC */
3223 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3225 dev_info(&vsi->back->pdev->dev,
3226 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3227 ring->queue_index, pf_q, err);
3231 /* Now associate this queue with this PCI function */
3233 if (ring->ch->type == I40E_VSI_VMDQ2)
3234 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3238 qtx_ctl |= (ring->ch->vsi_number <<
3239 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3240 I40E_QTX_CTL_VFVM_INDX_MASK;
3242 if (vsi->type == I40E_VSI_VMDQ2) {
3243 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3244 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3245 I40E_QTX_CTL_VFVM_INDX_MASK;
3247 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3251 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3252 I40E_QTX_CTL_PF_INDX_MASK);
3253 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3256 /* cache tail off for easier writes later */
3257 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3263 * i40e_configure_rx_ring - Configure a receive ring context
3264 * @ring: The Rx ring to configure
3266 * Configure the Rx descriptor ring in the HMC context.
3268 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3270 struct i40e_vsi *vsi = ring->vsi;
3271 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3272 u16 pf_q = vsi->base_queue + ring->queue_index;
3273 struct i40e_hw *hw = &vsi->back->hw;
3274 struct i40e_hmc_obj_rxq rx_ctx;
3275 i40e_status err = 0;
3279 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3281 /* clear the context structure first */
3282 memset(&rx_ctx, 0, sizeof(rx_ctx));
3284 if (ring->vsi->type == I40E_VSI_MAIN)
3285 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3288 ring->xsk_pool = i40e_xsk_pool(ring);
3289 if (ring->xsk_pool) {
3290 ret = i40e_alloc_rx_bi_zc(ring);
3294 xsk_pool_get_rx_frame_size(ring->xsk_pool);
3295 /* For AF_XDP ZC, we disallow packets to span on
3296 * multiple buffers, thus letting us skip that
3297 * handling in the fast-path.
3300 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3301 MEM_TYPE_XSK_BUFF_POOL,
3305 dev_info(&vsi->back->pdev->dev,
3306 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3310 ret = i40e_alloc_rx_bi(ring);
3313 ring->rx_buf_len = vsi->rx_buf_len;
3314 if (ring->vsi->type == I40E_VSI_MAIN) {
3315 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3316 MEM_TYPE_PAGE_SHARED,
3323 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3324 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3326 rx_ctx.base = (ring->dma / 128);
3327 rx_ctx.qlen = ring->count;
3329 /* use 16 byte descriptors */
3332 /* descriptor type is always zero
3335 rx_ctx.hsplit_0 = 0;
3337 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3338 if (hw->revision_id == 0)
3339 rx_ctx.lrxqthresh = 0;
3341 rx_ctx.lrxqthresh = 1;
3342 rx_ctx.crcstrip = 1;
3344 /* this controls whether VLAN is stripped from inner headers */
3346 /* set the prefena field to 1 because the manual says to */
3349 /* clear the context in the HMC */
3350 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3352 dev_info(&vsi->back->pdev->dev,
3353 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3354 ring->queue_index, pf_q, err);
3358 /* set the context in the HMC */
3359 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3361 dev_info(&vsi->back->pdev->dev,
3362 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3363 ring->queue_index, pf_q, err);
3367 /* configure Rx buffer alignment */
3368 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3369 clear_ring_build_skb_enabled(ring);
3371 set_ring_build_skb_enabled(ring);
3373 /* cache tail for quicker writes, and clear the reg before use */
3374 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3375 writel(0, ring->tail);
3377 if (ring->xsk_pool) {
3378 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3379 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3381 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3384 /* Log this in case the user has forgotten to give the kernel
3385 * any buffers, even later in the application.
3387 dev_info(&vsi->back->pdev->dev,
3388 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3389 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3390 ring->queue_index, pf_q);
3397 * i40e_vsi_configure_tx - Configure the VSI for Tx
3398 * @vsi: VSI structure describing this set of rings and resources
3400 * Configure the Tx VSI for operation.
3402 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3407 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3408 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3410 if (err || !i40e_enabled_xdp_vsi(vsi))
3413 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3414 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3420 * i40e_vsi_configure_rx - Configure the VSI for Rx
3421 * @vsi: the VSI being configured
3423 * Configure the Rx VSI for operation.
3425 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3430 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3431 vsi->max_frame = I40E_MAX_RXBUFFER;
3432 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3433 #if (PAGE_SIZE < 8192)
3434 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3435 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3436 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3437 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3440 vsi->max_frame = I40E_MAX_RXBUFFER;
3441 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3445 /* set up individual rings */
3446 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3447 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3453 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3454 * @vsi: ptr to the VSI
3456 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3458 struct i40e_ring *tx_ring, *rx_ring;
3459 u16 qoffset, qcount;
3462 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3463 /* Reset the TC information */
3464 for (i = 0; i < vsi->num_queue_pairs; i++) {
3465 rx_ring = vsi->rx_rings[i];
3466 tx_ring = vsi->tx_rings[i];
3467 rx_ring->dcb_tc = 0;
3468 tx_ring->dcb_tc = 0;
3473 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3474 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3477 qoffset = vsi->tc_config.tc_info[n].qoffset;
3478 qcount = vsi->tc_config.tc_info[n].qcount;
3479 for (i = qoffset; i < (qoffset + qcount); i++) {
3480 rx_ring = vsi->rx_rings[i];
3481 tx_ring = vsi->tx_rings[i];
3482 rx_ring->dcb_tc = n;
3483 tx_ring->dcb_tc = n;
3489 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3490 * @vsi: ptr to the VSI
3492 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3495 i40e_set_rx_mode(vsi->netdev);
3499 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3500 * @pf: Pointer to the targeted PF
3502 * Set all flow director counters to 0.
3504 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3506 pf->fd_tcp4_filter_cnt = 0;
3507 pf->fd_udp4_filter_cnt = 0;
3508 pf->fd_sctp4_filter_cnt = 0;
3509 pf->fd_ip4_filter_cnt = 0;
3510 pf->fd_tcp6_filter_cnt = 0;
3511 pf->fd_udp6_filter_cnt = 0;
3512 pf->fd_sctp6_filter_cnt = 0;
3513 pf->fd_ip6_filter_cnt = 0;
3517 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3518 * @vsi: Pointer to the targeted VSI
3520 * This function replays the hlist on the hw where all the SB Flow Director
3521 * filters were saved.
3523 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3525 struct i40e_fdir_filter *filter;
3526 struct i40e_pf *pf = vsi->back;
3527 struct hlist_node *node;
3529 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3532 /* Reset FDir counters as we're replaying all existing filters */
3533 i40e_reset_fdir_filter_cnt(pf);
3535 hlist_for_each_entry_safe(filter, node,
3536 &pf->fdir_filter_list, fdir_node) {
3537 i40e_add_del_fdir(vsi, filter, true);
3542 * i40e_vsi_configure - Set up the VSI for action
3543 * @vsi: the VSI being configured
3545 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3549 i40e_set_vsi_rx_mode(vsi);
3550 i40e_restore_vlan(vsi);
3551 i40e_vsi_config_dcb_rings(vsi);
3552 err = i40e_vsi_configure_tx(vsi);
3554 err = i40e_vsi_configure_rx(vsi);
3560 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3561 * @vsi: the VSI being configured
3563 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3565 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3566 struct i40e_pf *pf = vsi->back;
3567 struct i40e_hw *hw = &pf->hw;
3572 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3573 * and PFINT_LNKLSTn registers, e.g.:
3574 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3576 qp = vsi->base_queue;
3577 vector = vsi->base_vector;
3578 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3579 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3581 q_vector->rx.next_update = jiffies + 1;
3582 q_vector->rx.target_itr =
3583 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3584 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3585 q_vector->rx.target_itr >> 1);
3586 q_vector->rx.current_itr = q_vector->rx.target_itr;
3588 q_vector->tx.next_update = jiffies + 1;
3589 q_vector->tx.target_itr =
3590 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3591 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3592 q_vector->tx.target_itr >> 1);
3593 q_vector->tx.current_itr = q_vector->tx.target_itr;
3595 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3596 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3598 /* Linked list for the queuepairs assigned to this vector */
3599 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3600 for (q = 0; q < q_vector->num_ringpairs; q++) {
3601 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3604 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3605 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3606 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3607 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3608 (I40E_QUEUE_TYPE_TX <<
3609 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3611 wr32(hw, I40E_QINT_RQCTL(qp), val);
3614 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3615 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3616 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3617 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3618 (I40E_QUEUE_TYPE_TX <<
3619 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3621 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3624 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3625 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3626 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3627 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3628 (I40E_QUEUE_TYPE_RX <<
3629 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3631 /* Terminate the linked list */
3632 if (q == (q_vector->num_ringpairs - 1))
3633 val |= (I40E_QUEUE_END_OF_LIST <<
3634 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3636 wr32(hw, I40E_QINT_TQCTL(qp), val);
3645 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3646 * @pf: pointer to private device data structure
3648 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3650 struct i40e_hw *hw = &pf->hw;
3653 /* clear things first */
3654 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3655 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3657 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3658 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3659 I40E_PFINT_ICR0_ENA_GRST_MASK |
3660 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3661 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3662 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3663 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3664 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3666 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3667 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3669 if (pf->flags & I40E_FLAG_PTP)
3670 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3672 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3674 /* SW_ITR_IDX = 0, but don't change INTENA */
3675 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3676 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3678 /* OTHER_ITR_IDX = 0 */
3679 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3683 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3684 * @vsi: the VSI being configured
3686 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3688 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3689 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3690 struct i40e_pf *pf = vsi->back;
3691 struct i40e_hw *hw = &pf->hw;
3694 /* set the ITR configuration */
3695 q_vector->rx.next_update = jiffies + 1;
3696 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3697 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3698 q_vector->rx.current_itr = q_vector->rx.target_itr;
3699 q_vector->tx.next_update = jiffies + 1;
3700 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3701 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3702 q_vector->tx.current_itr = q_vector->tx.target_itr;
3704 i40e_enable_misc_int_causes(pf);
3706 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3707 wr32(hw, I40E_PFINT_LNKLST0, 0);
3709 /* Associate the queue pair to the vector and enable the queue int */
3710 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3711 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3712 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3713 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3715 wr32(hw, I40E_QINT_RQCTL(0), val);
3717 if (i40e_enabled_xdp_vsi(vsi)) {
3718 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3719 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3721 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3723 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3726 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3727 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3728 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3730 wr32(hw, I40E_QINT_TQCTL(0), val);
3735 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3736 * @pf: board private structure
3738 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3740 struct i40e_hw *hw = &pf->hw;
3742 wr32(hw, I40E_PFINT_DYN_CTL0,
3743 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3748 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3749 * @pf: board private structure
3751 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3753 struct i40e_hw *hw = &pf->hw;
3756 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3757 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3758 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3760 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3765 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3766 * @irq: interrupt number
3767 * @data: pointer to a q_vector
3769 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3771 struct i40e_q_vector *q_vector = data;
3773 if (!q_vector->tx.ring && !q_vector->rx.ring)
3776 napi_schedule_irqoff(&q_vector->napi);
3782 * i40e_irq_affinity_notify - Callback for affinity changes
3783 * @notify: context as to what irq was changed
3784 * @mask: the new affinity mask
3786 * This is a callback function used by the irq_set_affinity_notifier function
3787 * so that we may register to receive changes to the irq affinity masks.
3789 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3790 const cpumask_t *mask)
3792 struct i40e_q_vector *q_vector =
3793 container_of(notify, struct i40e_q_vector, affinity_notify);
3795 cpumask_copy(&q_vector->affinity_mask, mask);
3799 * i40e_irq_affinity_release - Callback for affinity notifier release
3800 * @ref: internal core kernel usage
3802 * This is a callback function used by the irq_set_affinity_notifier function
3803 * to inform the current notification subscriber that they will no longer
3804 * receive notifications.
3806 static void i40e_irq_affinity_release(struct kref *ref) {}
3809 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3810 * @vsi: the VSI being configured
3811 * @basename: name for the vector
3813 * Allocates MSI-X vectors and requests interrupts from the kernel.
3815 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3817 int q_vectors = vsi->num_q_vectors;
3818 struct i40e_pf *pf = vsi->back;
3819 int base = vsi->base_vector;
3826 for (vector = 0; vector < q_vectors; vector++) {
3827 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3829 irq_num = pf->msix_entries[base + vector].vector;
3831 if (q_vector->tx.ring && q_vector->rx.ring) {
3832 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3833 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3835 } else if (q_vector->rx.ring) {
3836 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3837 "%s-%s-%d", basename, "rx", rx_int_idx++);
3838 } else if (q_vector->tx.ring) {
3839 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3840 "%s-%s-%d", basename, "tx", tx_int_idx++);
3842 /* skip this unused q_vector */
3845 err = request_irq(irq_num,
3851 dev_info(&pf->pdev->dev,
3852 "MSIX request_irq failed, error: %d\n", err);
3853 goto free_queue_irqs;
3856 /* register for affinity change notifications */
3857 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3858 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3859 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3860 /* Spread affinity hints out across online CPUs.
3862 * get_cpu_mask returns a static constant mask with
3863 * a permanent lifetime so it's ok to pass to
3864 * irq_set_affinity_hint without making a copy.
3866 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3867 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3870 vsi->irqs_ready = true;
3876 irq_num = pf->msix_entries[base + vector].vector;
3877 irq_set_affinity_notifier(irq_num, NULL);
3878 irq_set_affinity_hint(irq_num, NULL);
3879 free_irq(irq_num, &vsi->q_vectors[vector]);
3885 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3886 * @vsi: the VSI being un-configured
3888 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3890 struct i40e_pf *pf = vsi->back;
3891 struct i40e_hw *hw = &pf->hw;
3892 int base = vsi->base_vector;
3895 /* disable interrupt causation from each queue */
3896 for (i = 0; i < vsi->num_queue_pairs; i++) {
3899 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3900 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3901 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3903 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3904 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3905 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3907 if (!i40e_enabled_xdp_vsi(vsi))
3909 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3912 /* disable each interrupt */
3913 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3914 for (i = vsi->base_vector;
3915 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3916 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3919 for (i = 0; i < vsi->num_q_vectors; i++)
3920 synchronize_irq(pf->msix_entries[i + base].vector);
3922 /* Legacy and MSI mode - this stops all interrupt handling */
3923 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3924 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3926 synchronize_irq(pf->pdev->irq);
3931 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3932 * @vsi: the VSI being configured
3934 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3936 struct i40e_pf *pf = vsi->back;
3939 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3940 for (i = 0; i < vsi->num_q_vectors; i++)
3941 i40e_irq_dynamic_enable(vsi, i);
3943 i40e_irq_dynamic_enable_icr0(pf);
3946 i40e_flush(&pf->hw);
3951 * i40e_free_misc_vector - Free the vector that handles non-queue events
3952 * @pf: board private structure
3954 static void i40e_free_misc_vector(struct i40e_pf *pf)
3957 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3958 i40e_flush(&pf->hw);
3960 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3961 synchronize_irq(pf->msix_entries[0].vector);
3962 free_irq(pf->msix_entries[0].vector, pf);
3963 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3968 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3969 * @irq: interrupt number
3970 * @data: pointer to a q_vector
3972 * This is the handler used for all MSI/Legacy interrupts, and deals
3973 * with both queue and non-queue interrupts. This is also used in
3974 * MSIX mode to handle the non-queue interrupts.
3976 static irqreturn_t i40e_intr(int irq, void *data)
3978 struct i40e_pf *pf = (struct i40e_pf *)data;
3979 struct i40e_hw *hw = &pf->hw;
3980 irqreturn_t ret = IRQ_NONE;
3981 u32 icr0, icr0_remaining;
3984 icr0 = rd32(hw, I40E_PFINT_ICR0);
3985 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3987 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3988 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3991 /* if interrupt but no bits showing, must be SWINT */
3992 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3993 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3996 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3997 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3998 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3999 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4000 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4003 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4004 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4005 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4006 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4008 /* We do not have a way to disarm Queue causes while leaving
4009 * interrupt enabled for all other causes, ideally
4010 * interrupt should be disabled while we are in NAPI but
4011 * this is not a performance path and napi_schedule()
4012 * can deal with rescheduling.
4014 if (!test_bit(__I40E_DOWN, pf->state))
4015 napi_schedule_irqoff(&q_vector->napi);
4018 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4019 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4020 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4021 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4024 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4025 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4026 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4029 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4030 /* disable any further VFLR event notifications */
4031 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4032 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4034 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4035 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4037 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4038 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4042 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4043 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4044 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4045 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4046 val = rd32(hw, I40E_GLGEN_RSTAT);
4047 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4048 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4049 if (val == I40E_RESET_CORER) {
4051 } else if (val == I40E_RESET_GLOBR) {
4053 } else if (val == I40E_RESET_EMPR) {
4055 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4059 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4060 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4061 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4062 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4063 rd32(hw, I40E_PFHMC_ERRORINFO),
4064 rd32(hw, I40E_PFHMC_ERRORDATA));
4067 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4068 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4070 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4071 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4072 i40e_ptp_tx_hwtstamp(pf);
4076 /* If a critical error is pending we have no choice but to reset the
4078 * Report and mask out any remaining unexpected interrupts.
4080 icr0_remaining = icr0 & ena_mask;
4081 if (icr0_remaining) {
4082 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4084 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4085 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4086 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4087 dev_info(&pf->pdev->dev, "device will be reset\n");
4088 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4089 i40e_service_event_schedule(pf);
4091 ena_mask &= ~icr0_remaining;
4096 /* re-enable interrupt causes */
4097 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4098 if (!test_bit(__I40E_DOWN, pf->state) ||
4099 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4100 i40e_service_event_schedule(pf);
4101 i40e_irq_dynamic_enable_icr0(pf);
4108 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4109 * @tx_ring: tx ring to clean
4110 * @budget: how many cleans we're allowed
4112 * Returns true if there's any budget left (e.g. the clean is finished)
4114 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4116 struct i40e_vsi *vsi = tx_ring->vsi;
4117 u16 i = tx_ring->next_to_clean;
4118 struct i40e_tx_buffer *tx_buf;
4119 struct i40e_tx_desc *tx_desc;
4121 tx_buf = &tx_ring->tx_bi[i];
4122 tx_desc = I40E_TX_DESC(tx_ring, i);
4123 i -= tx_ring->count;
4126 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4128 /* if next_to_watch is not set then there is no work pending */
4132 /* prevent any other reads prior to eop_desc */
4135 /* if the descriptor isn't done, no work yet to do */
4136 if (!(eop_desc->cmd_type_offset_bsz &
4137 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4140 /* clear next_to_watch to prevent false hangs */
4141 tx_buf->next_to_watch = NULL;
4143 tx_desc->buffer_addr = 0;
4144 tx_desc->cmd_type_offset_bsz = 0;
4145 /* move past filter desc */
4150 i -= tx_ring->count;
4151 tx_buf = tx_ring->tx_bi;
4152 tx_desc = I40E_TX_DESC(tx_ring, 0);
4154 /* unmap skb header data */
4155 dma_unmap_single(tx_ring->dev,
4156 dma_unmap_addr(tx_buf, dma),
4157 dma_unmap_len(tx_buf, len),
4159 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4160 kfree(tx_buf->raw_buf);
4162 tx_buf->raw_buf = NULL;
4163 tx_buf->tx_flags = 0;
4164 tx_buf->next_to_watch = NULL;
4165 dma_unmap_len_set(tx_buf, len, 0);
4166 tx_desc->buffer_addr = 0;
4167 tx_desc->cmd_type_offset_bsz = 0;
4169 /* move us past the eop_desc for start of next FD desc */
4174 i -= tx_ring->count;
4175 tx_buf = tx_ring->tx_bi;
4176 tx_desc = I40E_TX_DESC(tx_ring, 0);
4179 /* update budget accounting */
4181 } while (likely(budget));
4183 i += tx_ring->count;
4184 tx_ring->next_to_clean = i;
4186 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4187 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4193 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4194 * @irq: interrupt number
4195 * @data: pointer to a q_vector
4197 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4199 struct i40e_q_vector *q_vector = data;
4200 struct i40e_vsi *vsi;
4202 if (!q_vector->tx.ring)
4205 vsi = q_vector->tx.ring->vsi;
4206 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4212 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4213 * @vsi: the VSI being configured
4214 * @v_idx: vector index
4215 * @qp_idx: queue pair index
4217 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4219 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4220 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4221 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4223 tx_ring->q_vector = q_vector;
4224 tx_ring->next = q_vector->tx.ring;
4225 q_vector->tx.ring = tx_ring;
4226 q_vector->tx.count++;
4228 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4229 if (i40e_enabled_xdp_vsi(vsi)) {
4230 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4232 xdp_ring->q_vector = q_vector;
4233 xdp_ring->next = q_vector->tx.ring;
4234 q_vector->tx.ring = xdp_ring;
4235 q_vector->tx.count++;
4238 rx_ring->q_vector = q_vector;
4239 rx_ring->next = q_vector->rx.ring;
4240 q_vector->rx.ring = rx_ring;
4241 q_vector->rx.count++;
4245 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4246 * @vsi: the VSI being configured
4248 * This function maps descriptor rings to the queue-specific vectors
4249 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4250 * one vector per queue pair, but on a constrained vector budget, we
4251 * group the queue pairs as "efficiently" as possible.
4253 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4255 int qp_remaining = vsi->num_queue_pairs;
4256 int q_vectors = vsi->num_q_vectors;
4261 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4262 * group them so there are multiple queues per vector.
4263 * It is also important to go through all the vectors available to be
4264 * sure that if we don't use all the vectors, that the remaining vectors
4265 * are cleared. This is especially important when decreasing the
4266 * number of queues in use.
4268 for (; v_start < q_vectors; v_start++) {
4269 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4271 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4273 q_vector->num_ringpairs = num_ringpairs;
4274 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4276 q_vector->rx.count = 0;
4277 q_vector->tx.count = 0;
4278 q_vector->rx.ring = NULL;
4279 q_vector->tx.ring = NULL;
4281 while (num_ringpairs--) {
4282 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4290 * i40e_vsi_request_irq - Request IRQ from the OS
4291 * @vsi: the VSI being configured
4292 * @basename: name for the vector
4294 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4296 struct i40e_pf *pf = vsi->back;
4299 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4300 err = i40e_vsi_request_irq_msix(vsi, basename);
4301 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4302 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4305 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4309 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4314 #ifdef CONFIG_NET_POLL_CONTROLLER
4316 * i40e_netpoll - A Polling 'interrupt' handler
4317 * @netdev: network interface device structure
4319 * This is used by netconsole to send skbs without having to re-enable
4320 * interrupts. It's not called while the normal interrupt routine is executing.
4322 static void i40e_netpoll(struct net_device *netdev)
4324 struct i40e_netdev_priv *np = netdev_priv(netdev);
4325 struct i40e_vsi *vsi = np->vsi;
4326 struct i40e_pf *pf = vsi->back;
4329 /* if interface is down do nothing */
4330 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4333 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4334 for (i = 0; i < vsi->num_q_vectors; i++)
4335 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4337 i40e_intr(pf->pdev->irq, netdev);
4342 #define I40E_QTX_ENA_WAIT_COUNT 50
4345 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4346 * @pf: the PF being configured
4347 * @pf_q: the PF queue
4348 * @enable: enable or disable state of the queue
4350 * This routine will wait for the given Tx queue of the PF to reach the
4351 * enabled or disabled state.
4352 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4353 * multiple retries; else will return 0 in case of success.
4355 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4360 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4361 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4362 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4365 usleep_range(10, 20);
4367 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4374 * i40e_control_tx_q - Start or stop a particular Tx queue
4375 * @pf: the PF structure
4376 * @pf_q: the PF queue to configure
4377 * @enable: start or stop the queue
4379 * This function enables or disables a single queue. Note that any delay
4380 * required after the operation is expected to be handled by the caller of
4383 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4385 struct i40e_hw *hw = &pf->hw;
4389 /* warn the TX unit of coming changes */
4390 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4392 usleep_range(10, 20);
4394 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4395 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4396 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4397 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4399 usleep_range(1000, 2000);
4402 /* Skip if the queue is already in the requested state */
4403 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4406 /* turn on/off the queue */
4408 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4409 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4411 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4414 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4418 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4420 * @pf: the PF structure
4421 * @pf_q: the PF queue to configure
4422 * @is_xdp: true if the queue is used for XDP
4423 * @enable: start or stop the queue
4425 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4426 bool is_xdp, bool enable)
4430 i40e_control_tx_q(pf, pf_q, enable);
4432 /* wait for the change to finish */
4433 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4435 dev_info(&pf->pdev->dev,
4436 "VSI seid %d %sTx ring %d %sable timeout\n",
4437 seid, (is_xdp ? "XDP " : ""), pf_q,
4438 (enable ? "en" : "dis"));
4445 * i40e_vsi_control_tx - Start or stop a VSI's rings
4446 * @vsi: the VSI being configured
4447 * @enable: start or stop the rings
4449 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4451 struct i40e_pf *pf = vsi->back;
4452 int i, pf_q, ret = 0;
4454 pf_q = vsi->base_queue;
4455 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4456 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4458 false /*is xdp*/, enable);
4462 if (!i40e_enabled_xdp_vsi(vsi))
4465 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4466 pf_q + vsi->alloc_queue_pairs,
4467 true /*is xdp*/, enable);
4475 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4476 * @pf: the PF being configured
4477 * @pf_q: the PF queue
4478 * @enable: enable or disable state of the queue
4480 * This routine will wait for the given Rx queue of the PF to reach the
4481 * enabled or disabled state.
4482 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4483 * multiple retries; else will return 0 in case of success.
4485 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4490 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4491 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4492 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4495 usleep_range(10, 20);
4497 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4504 * i40e_control_rx_q - Start or stop a particular Rx queue
4505 * @pf: the PF structure
4506 * @pf_q: the PF queue to configure
4507 * @enable: start or stop the queue
4509 * This function enables or disables a single queue. Note that
4510 * any delay required after the operation is expected to be
4511 * handled by the caller of this function.
4513 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4515 struct i40e_hw *hw = &pf->hw;
4519 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4520 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4521 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4522 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4524 usleep_range(1000, 2000);
4527 /* Skip if the queue is already in the requested state */
4528 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4531 /* turn on/off the queue */
4533 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4535 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4537 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4541 * i40e_control_wait_rx_q
4542 * @pf: the PF structure
4543 * @pf_q: queue being configured
4544 * @enable: start or stop the rings
4546 * This function enables or disables a single queue along with waiting
4547 * for the change to finish. The caller of this function should handle
4548 * the delays needed in the case of disabling queues.
4550 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4554 i40e_control_rx_q(pf, pf_q, enable);
4556 /* wait for the change to finish */
4557 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4565 * i40e_vsi_control_rx - Start or stop a VSI's rings
4566 * @vsi: the VSI being configured
4567 * @enable: start or stop the rings
4569 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4571 struct i40e_pf *pf = vsi->back;
4572 int i, pf_q, ret = 0;
4574 pf_q = vsi->base_queue;
4575 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4576 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4578 dev_info(&pf->pdev->dev,
4579 "VSI seid %d Rx ring %d %sable timeout\n",
4580 vsi->seid, pf_q, (enable ? "en" : "dis"));
4585 /* Due to HW errata, on Rx disable only, the register can indicate done
4586 * before it really is. Needs 50ms to be sure
4595 * i40e_vsi_start_rings - Start a VSI's rings
4596 * @vsi: the VSI being configured
4598 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4602 /* do rx first for enable and last for disable */
4603 ret = i40e_vsi_control_rx(vsi, true);
4606 ret = i40e_vsi_control_tx(vsi, true);
4612 * i40e_vsi_stop_rings - Stop a VSI's rings
4613 * @vsi: the VSI being configured
4615 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4617 /* When port TX is suspended, don't wait */
4618 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4619 return i40e_vsi_stop_rings_no_wait(vsi);
4621 /* do rx first for enable and last for disable
4622 * Ignore return value, we need to shutdown whatever we can
4624 i40e_vsi_control_tx(vsi, false);
4625 i40e_vsi_control_rx(vsi, false);
4629 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4630 * @vsi: the VSI being shutdown
4632 * This function stops all the rings for a VSI but does not delay to verify
4633 * that rings have been disabled. It is expected that the caller is shutting
4634 * down multiple VSIs at once and will delay together for all the VSIs after
4635 * initiating the shutdown. This is particularly useful for shutting down lots
4636 * of VFs together. Otherwise, a large delay can be incurred while configuring
4637 * each VSI in serial.
4639 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4641 struct i40e_pf *pf = vsi->back;
4644 pf_q = vsi->base_queue;
4645 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4646 i40e_control_tx_q(pf, pf_q, false);
4647 i40e_control_rx_q(pf, pf_q, false);
4652 * i40e_vsi_free_irq - Free the irq association with the OS
4653 * @vsi: the VSI being configured
4655 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4657 struct i40e_pf *pf = vsi->back;
4658 struct i40e_hw *hw = &pf->hw;
4659 int base = vsi->base_vector;
4663 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4664 if (!vsi->q_vectors)
4667 if (!vsi->irqs_ready)
4670 vsi->irqs_ready = false;
4671 for (i = 0; i < vsi->num_q_vectors; i++) {
4676 irq_num = pf->msix_entries[vector].vector;
4678 /* free only the irqs that were actually requested */
4679 if (!vsi->q_vectors[i] ||
4680 !vsi->q_vectors[i]->num_ringpairs)
4683 /* clear the affinity notifier in the IRQ descriptor */
4684 irq_set_affinity_notifier(irq_num, NULL);
4685 /* remove our suggested affinity mask for this IRQ */
4686 irq_set_affinity_hint(irq_num, NULL);
4687 synchronize_irq(irq_num);
4688 free_irq(irq_num, vsi->q_vectors[i]);
4690 /* Tear down the interrupt queue link list
4692 * We know that they come in pairs and always
4693 * the Rx first, then the Tx. To clear the
4694 * link list, stick the EOL value into the
4695 * next_q field of the registers.
4697 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4698 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4699 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4700 val |= I40E_QUEUE_END_OF_LIST
4701 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4702 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4704 while (qp != I40E_QUEUE_END_OF_LIST) {
4707 val = rd32(hw, I40E_QINT_RQCTL(qp));
4709 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4710 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4711 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4712 I40E_QINT_RQCTL_INTEVENT_MASK);
4714 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4715 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4717 wr32(hw, I40E_QINT_RQCTL(qp), val);
4719 val = rd32(hw, I40E_QINT_TQCTL(qp));
4721 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4722 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4724 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4725 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4726 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4727 I40E_QINT_TQCTL_INTEVENT_MASK);
4729 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4730 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4732 wr32(hw, I40E_QINT_TQCTL(qp), val);
4737 free_irq(pf->pdev->irq, pf);
4739 val = rd32(hw, I40E_PFINT_LNKLST0);
4740 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4741 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4742 val |= I40E_QUEUE_END_OF_LIST
4743 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4744 wr32(hw, I40E_PFINT_LNKLST0, val);
4746 val = rd32(hw, I40E_QINT_RQCTL(qp));
4747 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4748 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4749 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4750 I40E_QINT_RQCTL_INTEVENT_MASK);
4752 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4753 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4755 wr32(hw, I40E_QINT_RQCTL(qp), val);
4757 val = rd32(hw, I40E_QINT_TQCTL(qp));
4759 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4760 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4761 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4762 I40E_QINT_TQCTL_INTEVENT_MASK);
4764 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4765 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4767 wr32(hw, I40E_QINT_TQCTL(qp), val);
4772 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4773 * @vsi: the VSI being configured
4774 * @v_idx: Index of vector to be freed
4776 * This function frees the memory allocated to the q_vector. In addition if
4777 * NAPI is enabled it will delete any references to the NAPI struct prior
4778 * to freeing the q_vector.
4780 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4782 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4783 struct i40e_ring *ring;
4788 /* disassociate q_vector from rings */
4789 i40e_for_each_ring(ring, q_vector->tx)
4790 ring->q_vector = NULL;
4792 i40e_for_each_ring(ring, q_vector->rx)
4793 ring->q_vector = NULL;
4795 /* only VSI w/ an associated netdev is set up w/ NAPI */
4797 netif_napi_del(&q_vector->napi);
4799 vsi->q_vectors[v_idx] = NULL;
4801 kfree_rcu(q_vector, rcu);
4805 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4806 * @vsi: the VSI being un-configured
4808 * This frees the memory allocated to the q_vectors and
4809 * deletes references to the NAPI struct.
4811 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4815 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4816 i40e_free_q_vector(vsi, v_idx);
4820 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4821 * @pf: board private structure
4823 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4825 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4826 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4827 pci_disable_msix(pf->pdev);
4828 kfree(pf->msix_entries);
4829 pf->msix_entries = NULL;
4830 kfree(pf->irq_pile);
4831 pf->irq_pile = NULL;
4832 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4833 pci_disable_msi(pf->pdev);
4835 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4839 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4840 * @pf: board private structure
4842 * We go through and clear interrupt specific resources and reset the structure
4843 * to pre-load conditions
4845 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4849 i40e_free_misc_vector(pf);
4851 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4852 I40E_IWARP_IRQ_PILE_ID);
4854 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4855 for (i = 0; i < pf->num_alloc_vsi; i++)
4857 i40e_vsi_free_q_vectors(pf->vsi[i]);
4858 i40e_reset_interrupt_capability(pf);
4862 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4863 * @vsi: the VSI being configured
4865 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4872 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4873 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4875 if (q_vector->rx.ring || q_vector->tx.ring)
4876 napi_enable(&q_vector->napi);
4881 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4882 * @vsi: the VSI being configured
4884 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4891 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4892 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4894 if (q_vector->rx.ring || q_vector->tx.ring)
4895 napi_disable(&q_vector->napi);
4900 * i40e_vsi_close - Shut down a VSI
4901 * @vsi: the vsi to be quelled
4903 static void i40e_vsi_close(struct i40e_vsi *vsi)
4905 struct i40e_pf *pf = vsi->back;
4906 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4908 i40e_vsi_free_irq(vsi);
4909 i40e_vsi_free_tx_resources(vsi);
4910 i40e_vsi_free_rx_resources(vsi);
4911 vsi->current_netdev_flags = 0;
4912 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4913 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4914 set_bit(__I40E_CLIENT_RESET, pf->state);
4918 * i40e_quiesce_vsi - Pause a given VSI
4919 * @vsi: the VSI being paused
4921 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4923 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4926 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4927 if (vsi->netdev && netif_running(vsi->netdev))
4928 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4930 i40e_vsi_close(vsi);
4934 * i40e_unquiesce_vsi - Resume a given VSI
4935 * @vsi: the VSI being resumed
4937 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4939 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4942 if (vsi->netdev && netif_running(vsi->netdev))
4943 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4945 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4949 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4952 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4956 for (v = 0; v < pf->num_alloc_vsi; v++) {
4958 i40e_quiesce_vsi(pf->vsi[v]);
4963 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4966 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4970 for (v = 0; v < pf->num_alloc_vsi; v++) {
4972 i40e_unquiesce_vsi(pf->vsi[v]);
4977 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4978 * @vsi: the VSI being configured
4980 * Wait until all queues on a given VSI have been disabled.
4982 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4984 struct i40e_pf *pf = vsi->back;
4987 pf_q = vsi->base_queue;
4988 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4989 /* Check and wait for the Tx queue */
4990 ret = i40e_pf_txq_wait(pf, pf_q, false);
4992 dev_info(&pf->pdev->dev,
4993 "VSI seid %d Tx ring %d disable timeout\n",
4998 if (!i40e_enabled_xdp_vsi(vsi))
5001 /* Check and wait for the XDP Tx queue */
5002 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5005 dev_info(&pf->pdev->dev,
5006 "VSI seid %d XDP Tx ring %d disable timeout\n",
5011 /* Check and wait for the Rx queue */
5012 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5014 dev_info(&pf->pdev->dev,
5015 "VSI seid %d Rx ring %d disable timeout\n",
5024 #ifdef CONFIG_I40E_DCB
5026 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5029 * This function waits for the queues to be in disabled state for all the
5030 * VSIs that are managed by this PF.
5032 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5036 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5038 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
5050 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5051 * @pf: pointer to PF
5053 * Get TC map for ISCSI PF type that will include iSCSI TC
5056 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5058 struct i40e_dcb_app_priority_table app;
5059 struct i40e_hw *hw = &pf->hw;
5060 u8 enabled_tc = 1; /* TC0 is always enabled */
5062 /* Get the iSCSI APP TLV */
5063 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5065 for (i = 0; i < dcbcfg->numapps; i++) {
5066 app = dcbcfg->app[i];
5067 if (app.selector == I40E_APP_SEL_TCPIP &&
5068 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5069 tc = dcbcfg->etscfg.prioritytable[app.priority];
5070 enabled_tc |= BIT(tc);
5079 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5080 * @dcbcfg: the corresponding DCBx configuration structure
5082 * Return the number of TCs from given DCBx configuration
5084 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5086 int i, tc_unused = 0;
5090 /* Scan the ETS Config Priority Table to find
5091 * traffic class enabled for a given priority
5092 * and create a bitmask of enabled TCs
5094 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5095 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5097 /* Now scan the bitmask to check for
5098 * contiguous TCs starting with TC0
5100 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5101 if (num_tc & BIT(i)) {
5105 pr_err("Non-contiguous TC - Disabling DCB\n");
5113 /* There is always at least TC0 */
5121 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5122 * @dcbcfg: the corresponding DCBx configuration structure
5124 * Query the current DCB configuration and return the number of
5125 * traffic classes enabled from the given DCBX config
5127 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5129 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5133 for (i = 0; i < num_tc; i++)
5134 enabled_tc |= BIT(i);
5140 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5141 * @pf: PF being queried
5143 * Query the current MQPRIO configuration and return the number of
5144 * traffic classes enabled.
5146 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5148 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5149 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5150 u8 enabled_tc = 1, i;
5152 for (i = 1; i < num_tc; i++)
5153 enabled_tc |= BIT(i);
5158 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5159 * @pf: PF being queried
5161 * Return number of traffic classes enabled for the given PF
5163 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5165 struct i40e_hw *hw = &pf->hw;
5166 u8 i, enabled_tc = 1;
5168 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5170 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5171 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5173 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5174 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5177 /* SFP mode will be enabled for all TCs on port */
5178 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5179 return i40e_dcb_get_num_tc(dcbcfg);
5181 /* MFP mode return count of enabled TCs for this PF */
5182 if (pf->hw.func_caps.iscsi)
5183 enabled_tc = i40e_get_iscsi_tc_map(pf);
5185 return 1; /* Only TC0 */
5187 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5188 if (enabled_tc & BIT(i))
5195 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5196 * @pf: PF being queried
5198 * Return a bitmap for enabled traffic classes for this PF.
5200 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5202 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5203 return i40e_mqprio_get_enabled_tc(pf);
5205 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5208 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5209 return I40E_DEFAULT_TRAFFIC_CLASS;
5211 /* SFP mode we want PF to be enabled for all TCs */
5212 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5213 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5215 /* MFP enabled and iSCSI PF type */
5216 if (pf->hw.func_caps.iscsi)
5217 return i40e_get_iscsi_tc_map(pf);
5219 return I40E_DEFAULT_TRAFFIC_CLASS;
5223 * i40e_vsi_get_bw_info - Query VSI BW Information
5224 * @vsi: the VSI being queried
5226 * Returns 0 on success, negative value on failure
5228 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5230 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5231 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5232 struct i40e_pf *pf = vsi->back;
5233 struct i40e_hw *hw = &pf->hw;
5238 /* Get the VSI level BW configuration */
5239 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5241 dev_info(&pf->pdev->dev,
5242 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5243 i40e_stat_str(&pf->hw, ret),
5244 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5248 /* Get the VSI level BW configuration per TC */
5249 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5252 dev_info(&pf->pdev->dev,
5253 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5254 i40e_stat_str(&pf->hw, ret),
5255 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5259 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5260 dev_info(&pf->pdev->dev,
5261 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5262 bw_config.tc_valid_bits,
5263 bw_ets_config.tc_valid_bits);
5264 /* Still continuing */
5267 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5268 vsi->bw_max_quanta = bw_config.max_bw;
5269 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5270 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5271 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5272 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5273 vsi->bw_ets_limit_credits[i] =
5274 le16_to_cpu(bw_ets_config.credits[i]);
5275 /* 3 bits out of 4 for each TC */
5276 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5283 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5284 * @vsi: the VSI being configured
5285 * @enabled_tc: TC bitmap
5286 * @bw_share: BW shared credits per TC
5288 * Returns 0 on success, negative value on failure
5290 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5293 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5294 struct i40e_pf *pf = vsi->back;
5298 /* There is no need to reset BW when mqprio mode is on. */
5299 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5301 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5302 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5304 dev_info(&pf->pdev->dev,
5305 "Failed to reset tx rate for vsi->seid %u\n",
5309 memset(&bw_data, 0, sizeof(bw_data));
5310 bw_data.tc_valid_bits = enabled_tc;
5311 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5312 bw_data.tc_bw_credits[i] = bw_share[i];
5314 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5316 dev_info(&pf->pdev->dev,
5317 "AQ command Config VSI BW allocation per TC failed = %d\n",
5318 pf->hw.aq.asq_last_status);
5322 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5323 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5329 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5330 * @vsi: the VSI being configured
5331 * @enabled_tc: TC map to be enabled
5334 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5336 struct net_device *netdev = vsi->netdev;
5337 struct i40e_pf *pf = vsi->back;
5338 struct i40e_hw *hw = &pf->hw;
5341 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5347 netdev_reset_tc(netdev);
5351 /* Set up actual enabled TCs on the VSI */
5352 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5355 /* set per TC queues for the VSI */
5356 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5357 /* Only set TC queues for enabled tcs
5359 * e.g. For a VSI that has TC0 and TC3 enabled the
5360 * enabled_tc bitmap would be 0x00001001; the driver
5361 * will set the numtc for netdev as 2 that will be
5362 * referenced by the netdev layer as TC 0 and 1.
5364 if (vsi->tc_config.enabled_tc & BIT(i))
5365 netdev_set_tc_queue(netdev,
5366 vsi->tc_config.tc_info[i].netdev_tc,
5367 vsi->tc_config.tc_info[i].qcount,
5368 vsi->tc_config.tc_info[i].qoffset);
5371 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5374 /* Assign UP2TC map for the VSI */
5375 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5376 /* Get the actual TC# for the UP */
5377 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5378 /* Get the mapped netdev TC# for the UP */
5379 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5380 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5385 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5386 * @vsi: the VSI being configured
5387 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5389 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5390 struct i40e_vsi_context *ctxt)
5392 /* copy just the sections touched not the entire info
5393 * since not all sections are valid as returned by
5396 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5397 memcpy(&vsi->info.queue_mapping,
5398 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5399 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5400 sizeof(vsi->info.tc_mapping));
5404 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5405 * @vsi: VSI to be configured
5406 * @enabled_tc: TC bitmap
5408 * This configures a particular VSI for TCs that are mapped to the
5409 * given TC bitmap. It uses default bandwidth share for TCs across
5410 * VSIs to configure TC for a particular VSI.
5413 * It is expected that the VSI queues have been quisced before calling
5416 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5418 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5419 struct i40e_pf *pf = vsi->back;
5420 struct i40e_hw *hw = &pf->hw;
5421 struct i40e_vsi_context ctxt;
5425 /* Check if enabled_tc is same as existing or new TCs */
5426 if (vsi->tc_config.enabled_tc == enabled_tc &&
5427 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5430 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5431 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5432 if (enabled_tc & BIT(i))
5436 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5438 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5440 dev_info(&pf->pdev->dev,
5441 "Failed configuring TC map %d for VSI %d\n",
5442 enabled_tc, vsi->seid);
5443 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5446 dev_info(&pf->pdev->dev,
5447 "Failed querying vsi bw info, err %s aq_err %s\n",
5448 i40e_stat_str(hw, ret),
5449 i40e_aq_str(hw, hw->aq.asq_last_status));
5452 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5453 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5456 valid_tc = bw_config.tc_valid_bits;
5457 /* Always enable TC0, no matter what */
5459 dev_info(&pf->pdev->dev,
5460 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5461 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5462 enabled_tc = valid_tc;
5465 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5467 dev_err(&pf->pdev->dev,
5468 "Unable to configure TC map %d for VSI %d\n",
5469 enabled_tc, vsi->seid);
5474 /* Update Queue Pairs Mapping for currently enabled UPs */
5475 ctxt.seid = vsi->seid;
5476 ctxt.pf_num = vsi->back->hw.pf_id;
5478 ctxt.uplink_seid = vsi->uplink_seid;
5479 ctxt.info = vsi->info;
5480 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5481 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5485 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5488 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5491 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5492 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5493 vsi->num_queue_pairs);
5494 ret = i40e_vsi_config_rss(vsi);
5496 dev_info(&vsi->back->pdev->dev,
5497 "Failed to reconfig rss for num_queues\n");
5500 vsi->reconfig_rss = false;
5502 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5503 ctxt.info.valid_sections |=
5504 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5505 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5508 /* Update the VSI after updating the VSI queue-mapping
5511 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5513 dev_info(&pf->pdev->dev,
5514 "Update vsi tc config failed, err %s aq_err %s\n",
5515 i40e_stat_str(hw, ret),
5516 i40e_aq_str(hw, hw->aq.asq_last_status));
5519 /* update the local VSI info with updated queue map */
5520 i40e_vsi_update_queue_map(vsi, &ctxt);
5521 vsi->info.valid_sections = 0;
5523 /* Update current VSI BW information */
5524 ret = i40e_vsi_get_bw_info(vsi);
5526 dev_info(&pf->pdev->dev,
5527 "Failed updating vsi bw info, err %s aq_err %s\n",
5528 i40e_stat_str(hw, ret),
5529 i40e_aq_str(hw, hw->aq.asq_last_status));
5533 /* Update the netdev TC setup */
5534 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5540 * i40e_get_link_speed - Returns link speed for the interface
5541 * @vsi: VSI to be configured
5544 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5546 struct i40e_pf *pf = vsi->back;
5548 switch (pf->hw.phy.link_info.link_speed) {
5549 case I40E_LINK_SPEED_40GB:
5551 case I40E_LINK_SPEED_25GB:
5553 case I40E_LINK_SPEED_20GB:
5555 case I40E_LINK_SPEED_10GB:
5557 case I40E_LINK_SPEED_1GB:
5565 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5566 * @vsi: VSI to be configured
5567 * @seid: seid of the channel/VSI
5568 * @max_tx_rate: max TX rate to be configured as BW limit
5570 * Helper function to set BW limit for a given VSI
5572 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5574 struct i40e_pf *pf = vsi->back;
5579 speed = i40e_get_link_speed(vsi);
5580 if (max_tx_rate > speed) {
5581 dev_err(&pf->pdev->dev,
5582 "Invalid max tx rate %llu specified for VSI seid %d.",
5586 if (max_tx_rate && max_tx_rate < 50) {
5587 dev_warn(&pf->pdev->dev,
5588 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5592 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5593 credits = max_tx_rate;
5594 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5595 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5596 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5598 dev_err(&pf->pdev->dev,
5599 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5600 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5601 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5606 * i40e_remove_queue_channels - Remove queue channels for the TCs
5607 * @vsi: VSI to be configured
5609 * Remove queue channels for the TCs
5611 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5613 enum i40e_admin_queue_err last_aq_status;
5614 struct i40e_cloud_filter *cfilter;
5615 struct i40e_channel *ch, *ch_tmp;
5616 struct i40e_pf *pf = vsi->back;
5617 struct hlist_node *node;
5620 /* Reset rss size that was stored when reconfiguring rss for
5621 * channel VSIs with non-power-of-2 queue count.
5623 vsi->current_rss_size = 0;
5625 /* perform cleanup for channels if they exist */
5626 if (list_empty(&vsi->ch_list))
5629 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5630 struct i40e_vsi *p_vsi;
5632 list_del(&ch->list);
5633 p_vsi = ch->parent_vsi;
5634 if (!p_vsi || !ch->initialized) {
5638 /* Reset queue contexts */
5639 for (i = 0; i < ch->num_queue_pairs; i++) {
5640 struct i40e_ring *tx_ring, *rx_ring;
5643 pf_q = ch->base_queue + i;
5644 tx_ring = vsi->tx_rings[pf_q];
5647 rx_ring = vsi->rx_rings[pf_q];
5651 /* Reset BW configured for this VSI via mqprio */
5652 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5654 dev_info(&vsi->back->pdev->dev,
5655 "Failed to reset tx rate for ch->seid %u\n",
5658 /* delete cloud filters associated with this channel */
5659 hlist_for_each_entry_safe(cfilter, node,
5660 &pf->cloud_filter_list, cloud_node) {
5661 if (cfilter->seid != ch->seid)
5664 hash_del(&cfilter->cloud_node);
5665 if (cfilter->dst_port)
5666 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5670 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5672 last_aq_status = pf->hw.aq.asq_last_status;
5674 dev_info(&pf->pdev->dev,
5675 "Failed to delete cloud filter, err %s aq_err %s\n",
5676 i40e_stat_str(&pf->hw, ret),
5677 i40e_aq_str(&pf->hw, last_aq_status));
5681 /* delete VSI from FW */
5682 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5685 dev_err(&vsi->back->pdev->dev,
5686 "unable to remove channel (%d) for parent VSI(%d)\n",
5687 ch->seid, p_vsi->seid);
5690 INIT_LIST_HEAD(&vsi->ch_list);
5694 * i40e_is_any_channel - channel exist or not
5695 * @vsi: ptr to VSI to which channels are associated with
5697 * Returns true or false if channel(s) exist for associated VSI or not
5699 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5701 struct i40e_channel *ch, *ch_tmp;
5703 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5704 if (ch->initialized)
5712 * i40e_get_max_queues_for_channel
5713 * @vsi: ptr to VSI to which channels are associated with
5715 * Helper function which returns max value among the queue counts set on the
5716 * channels/TCs created.
5718 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5720 struct i40e_channel *ch, *ch_tmp;
5723 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5724 if (!ch->initialized)
5726 if (ch->num_queue_pairs > max)
5727 max = ch->num_queue_pairs;
5734 * i40e_validate_num_queues - validate num_queues w.r.t channel
5735 * @pf: ptr to PF device
5736 * @num_queues: number of queues
5737 * @vsi: the parent VSI
5738 * @reconfig_rss: indicates should the RSS be reconfigured or not
5740 * This function validates number of queues in the context of new channel
5741 * which is being established and determines if RSS should be reconfigured
5742 * or not for parent VSI.
5744 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5745 struct i40e_vsi *vsi, bool *reconfig_rss)
5752 *reconfig_rss = false;
5753 if (vsi->current_rss_size) {
5754 if (num_queues > vsi->current_rss_size) {
5755 dev_dbg(&pf->pdev->dev,
5756 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5757 num_queues, vsi->current_rss_size);
5759 } else if ((num_queues < vsi->current_rss_size) &&
5760 (!is_power_of_2(num_queues))) {
5761 dev_dbg(&pf->pdev->dev,
5762 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5763 num_queues, vsi->current_rss_size);
5768 if (!is_power_of_2(num_queues)) {
5769 /* Find the max num_queues configured for channel if channel
5771 * if channel exist, then enforce 'num_queues' to be more than
5772 * max ever queues configured for channel.
5774 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5775 if (num_queues < max_ch_queues) {
5776 dev_dbg(&pf->pdev->dev,
5777 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5778 num_queues, max_ch_queues);
5781 *reconfig_rss = true;
5788 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5789 * @vsi: the VSI being setup
5790 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5792 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5794 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5796 struct i40e_pf *pf = vsi->back;
5797 u8 seed[I40E_HKEY_ARRAY_SIZE];
5798 struct i40e_hw *hw = &pf->hw;
5806 if (rss_size > vsi->rss_size)
5809 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5810 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5814 /* Ignoring user configured lut if there is one */
5815 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5817 /* Use user configured hash key if there is one, otherwise
5820 if (vsi->rss_hkey_user)
5821 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5823 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5825 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5827 dev_info(&pf->pdev->dev,
5828 "Cannot set RSS lut, err %s aq_err %s\n",
5829 i40e_stat_str(hw, ret),
5830 i40e_aq_str(hw, hw->aq.asq_last_status));
5836 /* Do the update w.r.t. storing rss_size */
5837 if (!vsi->orig_rss_size)
5838 vsi->orig_rss_size = vsi->rss_size;
5839 vsi->current_rss_size = local_rss_size;
5845 * i40e_channel_setup_queue_map - Setup a channel queue map
5846 * @pf: ptr to PF device
5847 * @ctxt: VSI context structure
5848 * @ch: ptr to channel structure
5850 * Setup queue map for a specific channel
5852 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5853 struct i40e_vsi_context *ctxt,
5854 struct i40e_channel *ch)
5856 u16 qcount, qmap, sections = 0;
5860 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5861 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5863 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5864 ch->num_queue_pairs = qcount;
5866 /* find the next higher power-of-2 of num queue pairs */
5867 pow = ilog2(qcount);
5868 if (!is_power_of_2(qcount))
5871 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5872 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5874 /* Setup queue TC[0].qmap for given VSI context */
5875 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5877 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5878 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5879 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5880 ctxt->info.valid_sections |= cpu_to_le16(sections);
5884 * i40e_add_channel - add a channel by adding VSI
5885 * @pf: ptr to PF device
5886 * @uplink_seid: underlying HW switching element (VEB) ID
5887 * @ch: ptr to channel structure
5889 * Add a channel (VSI) using add_vsi and queue_map
5891 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5892 struct i40e_channel *ch)
5894 struct i40e_hw *hw = &pf->hw;
5895 struct i40e_vsi_context ctxt;
5896 u8 enabled_tc = 0x1; /* TC0 enabled */
5899 if (ch->type != I40E_VSI_VMDQ2) {
5900 dev_info(&pf->pdev->dev,
5901 "add new vsi failed, ch->type %d\n", ch->type);
5905 memset(&ctxt, 0, sizeof(ctxt));
5906 ctxt.pf_num = hw->pf_id;
5908 ctxt.uplink_seid = uplink_seid;
5909 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5910 if (ch->type == I40E_VSI_VMDQ2)
5911 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5913 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5914 ctxt.info.valid_sections |=
5915 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5916 ctxt.info.switch_id =
5917 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5920 /* Set queue map for a given VSI context */
5921 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5923 /* Now time to create VSI */
5924 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5926 dev_info(&pf->pdev->dev,
5927 "add new vsi failed, err %s aq_err %s\n",
5928 i40e_stat_str(&pf->hw, ret),
5929 i40e_aq_str(&pf->hw,
5930 pf->hw.aq.asq_last_status));
5934 /* Success, update channel, set enabled_tc only if the channel
5937 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5938 ch->seid = ctxt.seid;
5939 ch->vsi_number = ctxt.vsi_number;
5940 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5942 /* copy just the sections touched not the entire info
5943 * since not all sections are valid as returned by
5946 ch->info.mapping_flags = ctxt.info.mapping_flags;
5947 memcpy(&ch->info.queue_mapping,
5948 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5949 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5950 sizeof(ctxt.info.tc_mapping));
5955 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5958 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5962 memset(&bw_data, 0, sizeof(bw_data));
5963 bw_data.tc_valid_bits = ch->enabled_tc;
5964 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5965 bw_data.tc_bw_credits[i] = bw_share[i];
5967 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5970 dev_info(&vsi->back->pdev->dev,
5971 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5972 vsi->back->hw.aq.asq_last_status, ch->seid);
5976 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5977 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5983 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5984 * @pf: ptr to PF device
5985 * @vsi: the VSI being setup
5986 * @ch: ptr to channel structure
5988 * Configure TX rings associated with channel (VSI) since queues are being
5991 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5992 struct i40e_vsi *vsi,
5993 struct i40e_channel *ch)
5997 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5999 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6000 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6001 if (ch->enabled_tc & BIT(i))
6005 /* configure BW for new VSI */
6006 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6008 dev_info(&vsi->back->pdev->dev,
6009 "Failed configuring TC map %d for channel (seid %u)\n",
6010 ch->enabled_tc, ch->seid);
6014 for (i = 0; i < ch->num_queue_pairs; i++) {
6015 struct i40e_ring *tx_ring, *rx_ring;
6018 pf_q = ch->base_queue + i;
6020 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6023 tx_ring = vsi->tx_rings[pf_q];
6026 /* Get the RX ring ptr */
6027 rx_ring = vsi->rx_rings[pf_q];
6035 * i40e_setup_hw_channel - setup new channel
6036 * @pf: ptr to PF device
6037 * @vsi: the VSI being setup
6038 * @ch: ptr to channel structure
6039 * @uplink_seid: underlying HW switching element (VEB) ID
6040 * @type: type of channel to be created (VMDq2/VF)
6042 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6043 * and configures TX rings accordingly
6045 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6046 struct i40e_vsi *vsi,
6047 struct i40e_channel *ch,
6048 u16 uplink_seid, u8 type)
6052 ch->initialized = false;
6053 ch->base_queue = vsi->next_base_queue;
6056 /* Proceed with creation of channel (VMDq2) VSI */
6057 ret = i40e_add_channel(pf, uplink_seid, ch);
6059 dev_info(&pf->pdev->dev,
6060 "failed to add_channel using uplink_seid %u\n",
6065 /* Mark the successful creation of channel */
6066 ch->initialized = true;
6068 /* Reconfigure TX queues using QTX_CTL register */
6069 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6071 dev_info(&pf->pdev->dev,
6072 "failed to configure TX rings for channel %u\n",
6077 /* update 'next_base_queue' */
6078 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6079 dev_dbg(&pf->pdev->dev,
6080 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6081 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6082 ch->num_queue_pairs,
6083 vsi->next_base_queue);
6088 * i40e_setup_channel - setup new channel using uplink element
6089 * @pf: ptr to PF device
6090 * @vsi: pointer to the VSI to set up the channel within
6091 * @ch: ptr to channel structure
6093 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6094 * and uplink switching element (uplink_seid)
6096 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6097 struct i40e_channel *ch)
6103 if (vsi->type == I40E_VSI_MAIN) {
6104 vsi_type = I40E_VSI_VMDQ2;
6106 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6111 /* underlying switching element */
6112 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6114 /* create channel (VSI), configure TX rings */
6115 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6117 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6121 return ch->initialized ? true : false;
6125 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6126 * @vsi: ptr to VSI which has PF backing
6128 * Sets up switch mode correctly if it needs to be changed and perform
6129 * what are allowed modes.
6131 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6134 struct i40e_pf *pf = vsi->back;
6135 struct i40e_hw *hw = &pf->hw;
6138 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6142 if (hw->dev_caps.switch_mode) {
6143 /* if switch mode is set, support mode2 (non-tunneled for
6144 * cloud filter) for now
6146 u32 switch_mode = hw->dev_caps.switch_mode &
6147 I40E_SWITCH_MODE_MASK;
6148 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6149 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6151 dev_err(&pf->pdev->dev,
6152 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6153 hw->dev_caps.switch_mode);
6158 /* Set Bit 7 to be valid */
6159 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6161 /* Set L4type for TCP support */
6162 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6164 /* Set cloud filter mode */
6165 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6167 /* Prep mode field for set_switch_config */
6168 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6169 pf->last_sw_conf_valid_flags,
6171 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6172 dev_err(&pf->pdev->dev,
6173 "couldn't set switch config bits, err %s aq_err %s\n",
6174 i40e_stat_str(hw, ret),
6176 hw->aq.asq_last_status));
6182 * i40e_create_queue_channel - function to create channel
6183 * @vsi: VSI to be configured
6184 * @ch: ptr to channel (it contains channel specific params)
6186 * This function creates channel (VSI) using num_queues specified by user,
6187 * reconfigs RSS if needed.
6189 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6190 struct i40e_channel *ch)
6192 struct i40e_pf *pf = vsi->back;
6199 if (!ch->num_queue_pairs) {
6200 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6201 ch->num_queue_pairs);
6205 /* validate user requested num_queues for channel */
6206 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6209 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6210 ch->num_queue_pairs);
6214 /* By default we are in VEPA mode, if this is the first VF/VMDq
6215 * VSI to be added switch to VEB mode.
6217 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6218 (!i40e_is_any_channel(vsi))) {
6219 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6220 dev_dbg(&pf->pdev->dev,
6221 "Failed to create channel. Override queues (%u) not power of 2\n",
6222 vsi->tc_config.tc_info[0].qcount);
6226 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6227 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6229 if (vsi->type == I40E_VSI_MAIN) {
6230 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6231 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6234 i40e_do_reset_safe(pf,
6235 I40E_PF_RESET_FLAG);
6238 /* now onwards for main VSI, number of queues will be value
6239 * of TC0's queue count
6243 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6244 * it should be more than num_queues
6246 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6247 dev_dbg(&pf->pdev->dev,
6248 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6249 vsi->cnt_q_avail, ch->num_queue_pairs);
6253 /* reconfig_rss only if vsi type is MAIN_VSI */
6254 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6255 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6257 dev_info(&pf->pdev->dev,
6258 "Error: unable to reconfig rss for num_queues (%u)\n",
6259 ch->num_queue_pairs);
6264 if (!i40e_setup_channel(pf, vsi, ch)) {
6265 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6269 dev_info(&pf->pdev->dev,
6270 "Setup channel (id:%u) utilizing num_queues %d\n",
6271 ch->seid, ch->num_queue_pairs);
6273 /* configure VSI for BW limit */
6274 if (ch->max_tx_rate) {
6275 u64 credits = ch->max_tx_rate;
6277 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6280 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6281 dev_dbg(&pf->pdev->dev,
6282 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6288 /* in case of VF, this will be main SRIOV VSI */
6289 ch->parent_vsi = vsi;
6291 /* and update main_vsi's count for queue_available to use */
6292 vsi->cnt_q_avail -= ch->num_queue_pairs;
6298 * i40e_configure_queue_channels - Add queue channel for the given TCs
6299 * @vsi: VSI to be configured
6301 * Configures queue channel mapping to the given TCs
6303 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6305 struct i40e_channel *ch;
6309 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6310 vsi->tc_seid_map[0] = vsi->seid;
6311 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6312 if (vsi->tc_config.enabled_tc & BIT(i)) {
6313 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6319 INIT_LIST_HEAD(&ch->list);
6320 ch->num_queue_pairs =
6321 vsi->tc_config.tc_info[i].qcount;
6323 vsi->tc_config.tc_info[i].qoffset;
6325 /* Bandwidth limit through tc interface is in bytes/s,
6328 max_rate = vsi->mqprio_qopt.max_rate[i];
6329 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6330 ch->max_tx_rate = max_rate;
6332 list_add_tail(&ch->list, &vsi->ch_list);
6334 ret = i40e_create_queue_channel(vsi, ch);
6336 dev_err(&vsi->back->pdev->dev,
6337 "Failed creating queue channel with TC%d: queues %d\n",
6338 i, ch->num_queue_pairs);
6341 vsi->tc_seid_map[i] = ch->seid;
6347 i40e_remove_queue_channels(vsi);
6352 * i40e_veb_config_tc - Configure TCs for given VEB
6354 * @enabled_tc: TC bitmap
6356 * Configures given TC bitmap for VEB (switching) element
6358 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6360 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6361 struct i40e_pf *pf = veb->pf;
6365 /* No TCs or already enabled TCs just return */
6366 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6369 bw_data.tc_valid_bits = enabled_tc;
6370 /* bw_data.absolute_credits is not set (relative) */
6372 /* Enable ETS TCs with equal BW Share for now */
6373 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6374 if (enabled_tc & BIT(i))
6375 bw_data.tc_bw_share_credits[i] = 1;
6378 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6381 dev_info(&pf->pdev->dev,
6382 "VEB bw config failed, err %s aq_err %s\n",
6383 i40e_stat_str(&pf->hw, ret),
6384 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6388 /* Update the BW information */
6389 ret = i40e_veb_get_bw_info(veb);
6391 dev_info(&pf->pdev->dev,
6392 "Failed getting veb bw config, err %s aq_err %s\n",
6393 i40e_stat_str(&pf->hw, ret),
6394 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6401 #ifdef CONFIG_I40E_DCB
6403 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6406 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6407 * the caller would've quiesce all the VSIs before calling
6410 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6416 /* Enable the TCs available on PF to all VEBs */
6417 tc_map = i40e_pf_get_tc_map(pf);
6418 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6421 for (v = 0; v < I40E_MAX_VEB; v++) {
6424 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6426 dev_info(&pf->pdev->dev,
6427 "Failed configuring TC for VEB seid=%d\n",
6429 /* Will try to configure as many components */
6433 /* Update each VSI */
6434 for (v = 0; v < pf->num_alloc_vsi; v++) {
6438 /* - Enable all TCs for the LAN VSI
6439 * - For all others keep them at TC0 for now
6441 if (v == pf->lan_vsi)
6442 tc_map = i40e_pf_get_tc_map(pf);
6444 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6446 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6448 dev_info(&pf->pdev->dev,
6449 "Failed configuring TC for VSI seid=%d\n",
6451 /* Will try to configure as many components */
6453 /* Re-configure VSI vectors based on updated TC map */
6454 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6455 if (pf->vsi[v]->netdev)
6456 i40e_dcbnl_set_all(pf->vsi[v]);
6462 * i40e_resume_port_tx - Resume port Tx
6465 * Resume a port's Tx and issue a PF reset in case of failure to
6468 static int i40e_resume_port_tx(struct i40e_pf *pf)
6470 struct i40e_hw *hw = &pf->hw;
6473 ret = i40e_aq_resume_port_tx(hw, NULL);
6475 dev_info(&pf->pdev->dev,
6476 "Resume Port Tx failed, err %s aq_err %s\n",
6477 i40e_stat_str(&pf->hw, ret),
6478 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6479 /* Schedule PF reset to recover */
6480 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6481 i40e_service_event_schedule(pf);
6488 * i40e_suspend_port_tx - Suspend port Tx
6491 * Suspend a port's Tx and issue a PF reset in case of failure.
6493 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6495 struct i40e_hw *hw = &pf->hw;
6498 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6500 dev_info(&pf->pdev->dev,
6501 "Suspend Port Tx failed, err %s aq_err %s\n",
6502 i40e_stat_str(&pf->hw, ret),
6503 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6504 /* Schedule PF reset to recover */
6505 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6506 i40e_service_event_schedule(pf);
6513 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6514 * @pf: PF being configured
6515 * @new_cfg: New DCBX configuration
6517 * Program DCB settings into HW and reconfigure VEB/VSIs on
6518 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6520 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6521 struct i40e_dcbx_config *new_cfg)
6523 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6526 /* Check if need reconfiguration */
6527 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6528 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6532 /* Config change disable all VSIs */
6533 i40e_pf_quiesce_all_vsi(pf);
6535 /* Copy the new config to the current config */
6536 *old_cfg = *new_cfg;
6537 old_cfg->etsrec = old_cfg->etscfg;
6538 ret = i40e_set_dcb_config(&pf->hw);
6540 dev_info(&pf->pdev->dev,
6541 "Set DCB Config failed, err %s aq_err %s\n",
6542 i40e_stat_str(&pf->hw, ret),
6543 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6547 /* Changes in configuration update VEB/VSI */
6548 i40e_dcb_reconfigure(pf);
6550 /* In case of reset do not try to resume anything */
6551 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6552 /* Re-start the VSIs if disabled */
6553 ret = i40e_resume_port_tx(pf);
6554 /* In case of error no point in resuming VSIs */
6557 i40e_pf_unquiesce_all_vsi(pf);
6564 * i40e_hw_dcb_config - Program new DCBX settings into HW
6565 * @pf: PF being configured
6566 * @new_cfg: New DCBX configuration
6568 * Program DCB settings into HW and reconfigure VEB/VSIs on
6571 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6573 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6574 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6575 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6576 struct i40e_dcbx_config *old_cfg;
6577 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6578 struct i40e_rx_pb_config pb_cfg;
6579 struct i40e_hw *hw = &pf->hw;
6580 u8 num_ports = hw->num_ports;
6588 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6589 /* Un-pack information to Program ETS HW via shared API
6592 * ETS/NON-ETS arbiter mode
6593 * max exponent (credit refills)
6594 * Total number of ports
6595 * PFC priority bit-map
6598 * Arbiter mode between UPs sharing same TC
6599 * TSA table (ETS or non-ETS)
6600 * EEE enabled or not
6604 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6606 memset(&ets_data, 0, sizeof(ets_data));
6607 for (i = 0; i < new_numtc; i++) {
6609 switch (new_cfg->etscfg.tsatable[i]) {
6610 case I40E_IEEE_TSA_ETS:
6611 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6612 ets_data.tc_bw_share_credits[i] =
6613 new_cfg->etscfg.tcbwtable[i];
6615 case I40E_IEEE_TSA_STRICT:
6616 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6618 ets_data.tc_bw_share_credits[i] =
6619 I40E_DCB_STRICT_PRIO_CREDITS;
6622 /* Invalid TSA type */
6623 need_reconfig = false;
6628 old_cfg = &hw->local_dcbx_config;
6629 /* Check if need reconfiguration */
6630 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6632 /* If needed, enable/disable frame tagging, disable all VSIs
6633 * and suspend port tx
6635 if (need_reconfig) {
6636 /* Enable DCB tagging only when more than one TC */
6638 pf->flags |= I40E_FLAG_DCB_ENABLED;
6640 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6642 set_bit(__I40E_PORT_SUSPENDED, pf->state);
6643 /* Reconfiguration needed quiesce all VSIs */
6644 i40e_pf_quiesce_all_vsi(pf);
6645 ret = i40e_suspend_port_tx(pf);
6650 /* Configure Port ETS Tx Scheduler */
6651 ets_data.tc_valid_bits = tc_map;
6652 ets_data.tc_strict_priority_flags = lltc_map;
6653 ret = i40e_aq_config_switch_comp_ets
6654 (hw, pf->mac_seid, &ets_data,
6655 i40e_aqc_opc_modify_switching_comp_ets, NULL);
6657 dev_info(&pf->pdev->dev,
6658 "Modify Port ETS failed, err %s aq_err %s\n",
6659 i40e_stat_str(&pf->hw, ret),
6660 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6664 /* Configure Rx ETS HW */
6665 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
6666 i40e_dcb_hw_set_num_tc(hw, new_numtc);
6667 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
6668 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
6669 I40E_DCB_DEFAULT_MAX_EXPONENT,
6671 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
6672 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
6674 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
6675 new_cfg->etscfg.prioritytable);
6676 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
6678 /* Configure Rx Packet Buffers in HW */
6679 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6680 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
6681 mfs_tc[i] += I40E_PACKET_HDR_PAD;
6684 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
6685 false, new_cfg->pfc.pfcenable,
6687 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
6689 /* Update the local Rx Packet buffer config */
6690 pf->pb_cfg = pb_cfg;
6692 /* Inform the FW about changes to DCB configuration */
6693 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
6695 dev_info(&pf->pdev->dev,
6696 "DCB Updated failed, err %s aq_err %s\n",
6697 i40e_stat_str(&pf->hw, ret),
6698 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6702 /* Update the port DCBx configuration */
6703 *old_cfg = *new_cfg;
6705 /* Changes in configuration update VEB/VSI */
6706 i40e_dcb_reconfigure(pf);
6708 /* Re-start the VSIs if disabled */
6709 if (need_reconfig) {
6710 ret = i40e_resume_port_tx(pf);
6712 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
6713 /* In case of error no point in resuming VSIs */
6717 /* Wait for the PF's queues to be disabled */
6718 ret = i40e_pf_wait_queues_disabled(pf);
6720 /* Schedule PF reset to recover */
6721 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6722 i40e_service_event_schedule(pf);
6725 i40e_pf_unquiesce_all_vsi(pf);
6726 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6727 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
6729 /* registers are set, lets apply */
6730 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
6731 ret = i40e_hw_set_dcb_config(pf, new_cfg);
6739 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
6740 * @pf: PF being queried
6742 * Set default DCB configuration in case DCB is to be done in SW.
6744 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
6746 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
6747 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6748 struct i40e_hw *hw = &pf->hw;
6751 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
6752 /* Update the local cached instance with TC0 ETS */
6753 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
6754 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6755 pf->tmp_cfg.etscfg.maxtcs = 0;
6756 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6757 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
6758 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
6759 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
6760 /* FW needs one App to configure HW */
6761 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
6762 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
6763 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
6764 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
6766 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
6769 memset(&ets_data, 0, sizeof(ets_data));
6770 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
6771 ets_data.tc_strict_priority_flags = 0; /* ETS */
6772 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
6774 /* Enable ETS on the Physical port */
6775 err = i40e_aq_config_switch_comp_ets
6776 (hw, pf->mac_seid, &ets_data,
6777 i40e_aqc_opc_enable_switching_comp_ets, NULL);
6779 dev_info(&pf->pdev->dev,
6780 "Enable Port ETS failed, err %s aq_err %s\n",
6781 i40e_stat_str(&pf->hw, err),
6782 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6787 /* Update the local cached instance with TC0 ETS */
6788 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
6789 dcb_cfg->etscfg.cbs = 0;
6790 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
6791 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
6798 * i40e_init_pf_dcb - Initialize DCB configuration
6799 * @pf: PF being configured
6801 * Query the current DCB configuration and cache it
6802 * in the hardware structure
6804 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6806 struct i40e_hw *hw = &pf->hw;
6809 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6810 * Also do not enable DCBx if FW LLDP agent is disabled
6812 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
6813 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
6814 err = I40E_NOT_SUPPORTED;
6817 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
6818 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
6819 err = i40e_dcb_sw_default_config(pf);
6821 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
6824 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
6825 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
6826 DCB_CAP_DCBX_VER_IEEE;
6827 /* at init capable but disabled */
6828 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6829 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6832 err = i40e_init_dcb(hw, true);
6834 /* Device/Function is not DCBX capable */
6835 if ((!hw->func_caps.dcb) ||
6836 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6837 dev_info(&pf->pdev->dev,
6838 "DCBX offload is not supported or is disabled for this PF.\n");
6840 /* When status is not DISABLED then DCBX in FW */
6841 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6842 DCB_CAP_DCBX_VER_IEEE;
6844 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6845 /* Enable DCB tagging only when more than one TC
6846 * or explicitly disable if only one TC
6848 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6849 pf->flags |= I40E_FLAG_DCB_ENABLED;
6851 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6852 dev_dbg(&pf->pdev->dev,
6853 "DCBX offload is supported for this PF.\n");
6855 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6856 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6857 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6859 dev_info(&pf->pdev->dev,
6860 "Query for DCB configuration failed, err %s aq_err %s\n",
6861 i40e_stat_str(&pf->hw, err),
6862 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6868 #endif /* CONFIG_I40E_DCB */
6871 * i40e_set_lldp_forwarding - set forwarding of lldp frames
6872 * @pf: PF being configured
6873 * @enable: if forwarding to OS shall be enabled
6875 * Toggle forwarding of lldp frames behavior,
6876 * When passing DCB control from firmware to software
6877 * lldp frames must be forwarded to the software based
6880 void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
6882 if (pf->lan_vsi == I40E_NO_VSI)
6885 if (!pf->vsi[pf->lan_vsi])
6888 /* No need to check the outcome, commands may fail
6889 * if desired value is already set
6891 i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
6892 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
6893 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
6894 pf->vsi[pf->lan_vsi]->seid, 0,
6895 enable, NULL, NULL);
6897 i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
6898 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
6899 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
6900 pf->vsi[pf->lan_vsi]->seid, 0,
6901 enable, NULL, NULL);
6905 * i40e_print_link_message - print link up or down
6906 * @vsi: the VSI for which link needs a message
6907 * @isup: true of link is up, false otherwise
6909 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6911 enum i40e_aq_link_speed new_speed;
6912 struct i40e_pf *pf = vsi->back;
6913 char *speed = "Unknown";
6914 char *fc = "Unknown";
6920 new_speed = pf->hw.phy.link_info.link_speed;
6922 new_speed = I40E_LINK_SPEED_UNKNOWN;
6924 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6926 vsi->current_isup = isup;
6927 vsi->current_speed = new_speed;
6929 netdev_info(vsi->netdev, "NIC Link is Down\n");
6933 /* Warn user if link speed on NPAR enabled partition is not at
6936 if (pf->hw.func_caps.npar_enable &&
6937 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6938 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6939 netdev_warn(vsi->netdev,
6940 "The partition detected link speed that is less than 10Gbps\n");
6942 switch (pf->hw.phy.link_info.link_speed) {
6943 case I40E_LINK_SPEED_40GB:
6946 case I40E_LINK_SPEED_20GB:
6949 case I40E_LINK_SPEED_25GB:
6952 case I40E_LINK_SPEED_10GB:
6955 case I40E_LINK_SPEED_5GB:
6958 case I40E_LINK_SPEED_2_5GB:
6961 case I40E_LINK_SPEED_1GB:
6964 case I40E_LINK_SPEED_100MB:
6971 switch (pf->hw.fc.current_mode) {
6975 case I40E_FC_TX_PAUSE:
6978 case I40E_FC_RX_PAUSE:
6986 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6991 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6994 if (pf->hw.phy.link_info.fec_info &
6995 I40E_AQ_CONFIG_FEC_KR_ENA)
6996 fec = "CL74 FC-FEC/BASE-R";
6997 else if (pf->hw.phy.link_info.fec_info &
6998 I40E_AQ_CONFIG_FEC_RS_ENA)
6999 fec = "CL108 RS-FEC";
7001 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7002 * both RS and FC are requested
7004 if (vsi->back->hw.phy.link_info.req_fec_info &
7005 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7006 if (vsi->back->hw.phy.link_info.req_fec_info &
7007 I40E_AQ_REQUEST_FEC_RS)
7008 req_fec = "CL108 RS-FEC";
7010 req_fec = "CL74 FC-FEC/BASE-R";
7012 netdev_info(vsi->netdev,
7013 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7014 speed, req_fec, fec, an, fc);
7015 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7020 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7023 if (pf->hw.phy.link_info.fec_info &
7024 I40E_AQ_CONFIG_FEC_KR_ENA)
7025 fec = "CL74 FC-FEC/BASE-R";
7027 if (pf->hw.phy.link_info.req_fec_info &
7028 I40E_AQ_REQUEST_FEC_KR)
7029 req_fec = "CL74 FC-FEC/BASE-R";
7031 netdev_info(vsi->netdev,
7032 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7033 speed, req_fec, fec, an, fc);
7035 netdev_info(vsi->netdev,
7036 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7043 * i40e_up_complete - Finish the last steps of bringing up a connection
7044 * @vsi: the VSI being configured
7046 static int i40e_up_complete(struct i40e_vsi *vsi)
7048 struct i40e_pf *pf = vsi->back;
7051 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7052 i40e_vsi_configure_msix(vsi);
7054 i40e_configure_msi_and_legacy(vsi);
7057 err = i40e_vsi_start_rings(vsi);
7061 clear_bit(__I40E_VSI_DOWN, vsi->state);
7062 i40e_napi_enable_all(vsi);
7063 i40e_vsi_enable_irq(vsi);
7065 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7067 i40e_print_link_message(vsi, true);
7068 netif_tx_start_all_queues(vsi->netdev);
7069 netif_carrier_on(vsi->netdev);
7072 /* replay FDIR SB filters */
7073 if (vsi->type == I40E_VSI_FDIR) {
7074 /* reset fd counters */
7077 i40e_fdir_filter_restore(vsi);
7080 /* On the next run of the service_task, notify any clients of the new
7083 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7084 i40e_service_event_schedule(pf);
7090 * i40e_vsi_reinit_locked - Reset the VSI
7091 * @vsi: the VSI being configured
7093 * Rebuild the ring structs after some configuration
7094 * has changed, e.g. MTU size.
7096 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7098 struct i40e_pf *pf = vsi->back;
7100 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7101 usleep_range(1000, 2000);
7105 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7109 * i40e_force_link_state - Force the link status
7110 * @pf: board private structure
7111 * @is_up: whether the link state should be forced up or down
7113 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7115 struct i40e_aq_get_phy_abilities_resp abilities;
7116 struct i40e_aq_set_phy_config config = {0};
7117 bool non_zero_phy_type = is_up;
7118 struct i40e_hw *hw = &pf->hw;
7123 /* Card might've been put in an unstable state by other drivers
7124 * and applications, which causes incorrect speed values being
7125 * set on startup. In order to clear speed registers, we call
7126 * get_phy_capabilities twice, once to get initial state of
7127 * available speeds, and once to get current PHY config.
7129 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7132 dev_err(&pf->pdev->dev,
7133 "failed to get phy cap., ret = %s last_status = %s\n",
7134 i40e_stat_str(hw, err),
7135 i40e_aq_str(hw, hw->aq.asq_last_status));
7138 speed = abilities.link_speed;
7140 /* Get the current phy config */
7141 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7144 dev_err(&pf->pdev->dev,
7145 "failed to get phy cap., ret = %s last_status = %s\n",
7146 i40e_stat_str(hw, err),
7147 i40e_aq_str(hw, hw->aq.asq_last_status));
7151 /* If link needs to go up, but was not forced to go down,
7152 * and its speed values are OK, no need for a flap
7153 * if non_zero_phy_type was set, still need to force up
7155 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7156 non_zero_phy_type = true;
7157 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7158 return I40E_SUCCESS;
7160 /* To force link we need to set bits for all supported PHY types,
7161 * but there are now more than 32, so we need to split the bitmap
7162 * across two fields.
7164 mask = I40E_PHY_TYPES_BITMASK;
7166 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7167 config.phy_type_ext =
7168 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7169 /* Copy the old settings, except of phy_type */
7170 config.abilities = abilities.abilities;
7171 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7173 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7175 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7177 if (abilities.link_speed != 0)
7178 config.link_speed = abilities.link_speed;
7180 config.link_speed = speed;
7181 config.eee_capability = abilities.eee_capability;
7182 config.eeer = abilities.eeer_val;
7183 config.low_power_ctrl = abilities.d3_lpan;
7184 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7185 I40E_AQ_PHY_FEC_CONFIG_MASK;
7186 err = i40e_aq_set_phy_config(hw, &config, NULL);
7189 dev_err(&pf->pdev->dev,
7190 "set phy config ret = %s last_status = %s\n",
7191 i40e_stat_str(&pf->hw, err),
7192 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7196 /* Update the link info */
7197 err = i40e_update_link_info(hw);
7199 /* Wait a little bit (on 40G cards it sometimes takes a really
7200 * long time for link to come back from the atomic reset)
7204 i40e_update_link_info(hw);
7207 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7209 return I40E_SUCCESS;
7213 * i40e_up - Bring the connection back up after being down
7214 * @vsi: the VSI being configured
7216 int i40e_up(struct i40e_vsi *vsi)
7220 if (vsi->type == I40E_VSI_MAIN &&
7221 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7222 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7223 i40e_force_link_state(vsi->back, true);
7225 err = i40e_vsi_configure(vsi);
7227 err = i40e_up_complete(vsi);
7233 * i40e_down - Shutdown the connection processing
7234 * @vsi: the VSI being stopped
7236 void i40e_down(struct i40e_vsi *vsi)
7240 /* It is assumed that the caller of this function
7241 * sets the vsi->state __I40E_VSI_DOWN bit.
7244 netif_carrier_off(vsi->netdev);
7245 netif_tx_disable(vsi->netdev);
7247 i40e_vsi_disable_irq(vsi);
7248 i40e_vsi_stop_rings(vsi);
7249 if (vsi->type == I40E_VSI_MAIN &&
7250 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7251 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7252 i40e_force_link_state(vsi->back, false);
7253 i40e_napi_disable_all(vsi);
7255 for (i = 0; i < vsi->num_queue_pairs; i++) {
7256 i40e_clean_tx_ring(vsi->tx_rings[i]);
7257 if (i40e_enabled_xdp_vsi(vsi)) {
7258 /* Make sure that in-progress ndo_xdp_xmit and
7259 * ndo_xsk_wakeup calls are completed.
7262 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7264 i40e_clean_rx_ring(vsi->rx_rings[i]);
7270 * i40e_validate_mqprio_qopt- validate queue mapping info
7271 * @vsi: the VSI being configured
7272 * @mqprio_qopt: queue parametrs
7274 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7275 struct tc_mqprio_qopt_offload *mqprio_qopt)
7277 u64 sum_max_rate = 0;
7281 if (mqprio_qopt->qopt.offset[0] != 0 ||
7282 mqprio_qopt->qopt.num_tc < 1 ||
7283 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7285 for (i = 0; ; i++) {
7286 if (!mqprio_qopt->qopt.count[i])
7288 if (mqprio_qopt->min_rate[i]) {
7289 dev_err(&vsi->back->pdev->dev,
7290 "Invalid min tx rate (greater than 0) specified\n");
7293 max_rate = mqprio_qopt->max_rate[i];
7294 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7295 sum_max_rate += max_rate;
7297 if (i >= mqprio_qopt->qopt.num_tc - 1)
7299 if (mqprio_qopt->qopt.offset[i + 1] !=
7300 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7303 if (vsi->num_queue_pairs <
7304 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7307 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7308 dev_err(&vsi->back->pdev->dev,
7309 "Invalid max tx rate specified\n");
7316 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7317 * @vsi: the VSI being configured
7319 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7324 /* Only TC0 is enabled */
7325 vsi->tc_config.numtc = 1;
7326 vsi->tc_config.enabled_tc = 1;
7327 qcount = min_t(int, vsi->alloc_queue_pairs,
7328 i40e_pf_get_max_q_per_tc(vsi->back));
7329 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7330 /* For the TC that is not enabled set the offset to to default
7331 * queue and allocate one queue for the given TC.
7333 vsi->tc_config.tc_info[i].qoffset = 0;
7335 vsi->tc_config.tc_info[i].qcount = qcount;
7337 vsi->tc_config.tc_info[i].qcount = 1;
7338 vsi->tc_config.tc_info[i].netdev_tc = 0;
7343 * i40e_del_macvlan_filter
7344 * @hw: pointer to the HW structure
7345 * @seid: seid of the channel VSI
7346 * @macaddr: the mac address to apply as a filter
7347 * @aq_err: store the admin Q error
7349 * This function deletes a mac filter on the channel VSI which serves as the
7350 * macvlan. Returns 0 on success.
7352 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7353 const u8 *macaddr, int *aq_err)
7355 struct i40e_aqc_remove_macvlan_element_data element;
7358 memset(&element, 0, sizeof(element));
7359 ether_addr_copy(element.mac_addr, macaddr);
7360 element.vlan_tag = 0;
7361 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7362 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7363 *aq_err = hw->aq.asq_last_status;
7369 * i40e_add_macvlan_filter
7370 * @hw: pointer to the HW structure
7371 * @seid: seid of the channel VSI
7372 * @macaddr: the mac address to apply as a filter
7373 * @aq_err: store the admin Q error
7375 * This function adds a mac filter on the channel VSI which serves as the
7376 * macvlan. Returns 0 on success.
7378 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7379 const u8 *macaddr, int *aq_err)
7381 struct i40e_aqc_add_macvlan_element_data element;
7385 ether_addr_copy(element.mac_addr, macaddr);
7386 element.vlan_tag = 0;
7387 element.queue_number = 0;
7388 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7389 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7390 element.flags = cpu_to_le16(cmd_flags);
7391 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7392 *aq_err = hw->aq.asq_last_status;
7398 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7399 * @vsi: the VSI we want to access
7400 * @ch: the channel we want to access
7402 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7404 struct i40e_ring *tx_ring, *rx_ring;
7408 for (i = 0; i < ch->num_queue_pairs; i++) {
7409 pf_q = ch->base_queue + i;
7410 tx_ring = vsi->tx_rings[pf_q];
7412 rx_ring = vsi->rx_rings[pf_q];
7418 * i40e_free_macvlan_channels
7419 * @vsi: the VSI we want to access
7421 * This function frees the Qs of the channel VSI from
7422 * the stack and also deletes the channel VSIs which
7423 * serve as macvlans.
7425 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7427 struct i40e_channel *ch, *ch_tmp;
7430 if (list_empty(&vsi->macvlan_list))
7433 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7434 struct i40e_vsi *parent_vsi;
7436 if (i40e_is_channel_macvlan(ch)) {
7437 i40e_reset_ch_rings(vsi, ch);
7438 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7439 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7440 netdev_set_sb_channel(ch->fwd->netdev, 0);
7445 list_del(&ch->list);
7446 parent_vsi = ch->parent_vsi;
7447 if (!parent_vsi || !ch->initialized) {
7452 /* remove the VSI */
7453 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7456 dev_err(&vsi->back->pdev->dev,
7457 "unable to remove channel (%d) for parent VSI(%d)\n",
7458 ch->seid, parent_vsi->seid);
7461 vsi->macvlan_cnt = 0;
7465 * i40e_fwd_ring_up - bring the macvlan device up
7466 * @vsi: the VSI we want to access
7467 * @vdev: macvlan netdevice
7468 * @fwd: the private fwd structure
7470 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7471 struct i40e_fwd_adapter *fwd)
7473 int ret = 0, num_tc = 1, i, aq_err;
7474 struct i40e_channel *ch, *ch_tmp;
7475 struct i40e_pf *pf = vsi->back;
7476 struct i40e_hw *hw = &pf->hw;
7478 if (list_empty(&vsi->macvlan_list))
7481 /* Go through the list and find an available channel */
7482 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7483 if (!i40e_is_channel_macvlan(ch)) {
7485 /* record configuration for macvlan interface in vdev */
7486 for (i = 0; i < num_tc; i++)
7487 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7489 ch->num_queue_pairs,
7491 for (i = 0; i < ch->num_queue_pairs; i++) {
7492 struct i40e_ring *tx_ring, *rx_ring;
7495 pf_q = ch->base_queue + i;
7497 /* Get to TX ring ptr */
7498 tx_ring = vsi->tx_rings[pf_q];
7501 /* Get the RX ring ptr */
7502 rx_ring = vsi->rx_rings[pf_q];
7509 /* Guarantee all rings are updated before we update the
7510 * MAC address filter.
7514 /* Add a mac filter */
7515 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7517 /* if we cannot add the MAC rule then disable the offload */
7518 macvlan_release_l2fw_offload(vdev);
7519 for (i = 0; i < ch->num_queue_pairs; i++) {
7520 struct i40e_ring *rx_ring;
7523 pf_q = ch->base_queue + i;
7524 rx_ring = vsi->rx_rings[pf_q];
7525 rx_ring->netdev = NULL;
7527 dev_info(&pf->pdev->dev,
7528 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7529 i40e_stat_str(hw, ret),
7530 i40e_aq_str(hw, aq_err));
7531 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7538 * i40e_setup_macvlans - create the channels which will be macvlans
7539 * @vsi: the VSI we want to access
7540 * @macvlan_cnt: no. of macvlans to be setup
7541 * @qcnt: no. of Qs per macvlan
7542 * @vdev: macvlan netdevice
7544 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7545 struct net_device *vdev)
7547 struct i40e_pf *pf = vsi->back;
7548 struct i40e_hw *hw = &pf->hw;
7549 struct i40e_vsi_context ctxt;
7550 u16 sections, qmap, num_qps;
7551 struct i40e_channel *ch;
7552 int i, pow, ret = 0;
7555 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7558 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7560 /* find the next higher power-of-2 of num queue pairs */
7561 pow = fls(roundup_pow_of_two(num_qps) - 1);
7563 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7564 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7566 /* Setup context bits for the main VSI */
7567 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7568 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7569 memset(&ctxt, 0, sizeof(ctxt));
7570 ctxt.seid = vsi->seid;
7571 ctxt.pf_num = vsi->back->hw.pf_id;
7573 ctxt.uplink_seid = vsi->uplink_seid;
7574 ctxt.info = vsi->info;
7575 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7576 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7577 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7578 ctxt.info.valid_sections |= cpu_to_le16(sections);
7580 /* Reconfigure RSS for main VSI with new max queue count */
7581 vsi->rss_size = max_t(u16, num_qps, qcnt);
7582 ret = i40e_vsi_config_rss(vsi);
7584 dev_info(&pf->pdev->dev,
7585 "Failed to reconfig RSS for num_queues (%u)\n",
7589 vsi->reconfig_rss = true;
7590 dev_dbg(&vsi->back->pdev->dev,
7591 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7592 vsi->next_base_queue = num_qps;
7593 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7595 /* Update the VSI after updating the VSI queue-mapping
7598 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7600 dev_info(&pf->pdev->dev,
7601 "Update vsi tc config failed, err %s aq_err %s\n",
7602 i40e_stat_str(hw, ret),
7603 i40e_aq_str(hw, hw->aq.asq_last_status));
7606 /* update the local VSI info with updated queue map */
7607 i40e_vsi_update_queue_map(vsi, &ctxt);
7608 vsi->info.valid_sections = 0;
7610 /* Create channels for macvlans */
7611 INIT_LIST_HEAD(&vsi->macvlan_list);
7612 for (i = 0; i < macvlan_cnt; i++) {
7613 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7618 INIT_LIST_HEAD(&ch->list);
7619 ch->num_queue_pairs = qcnt;
7620 if (!i40e_setup_channel(pf, vsi, ch)) {
7625 ch->parent_vsi = vsi;
7626 vsi->cnt_q_avail -= ch->num_queue_pairs;
7628 list_add_tail(&ch->list, &vsi->macvlan_list);
7634 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7635 i40e_free_macvlan_channels(vsi);
7641 * i40e_fwd_add - configure macvlans
7642 * @netdev: net device to configure
7643 * @vdev: macvlan netdevice
7645 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7647 struct i40e_netdev_priv *np = netdev_priv(netdev);
7648 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7649 struct i40e_vsi *vsi = np->vsi;
7650 struct i40e_pf *pf = vsi->back;
7651 struct i40e_fwd_adapter *fwd;
7652 int avail_macvlan, ret;
7654 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7655 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7656 return ERR_PTR(-EINVAL);
7658 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7659 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7660 return ERR_PTR(-EINVAL);
7662 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7663 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7664 return ERR_PTR(-EINVAL);
7667 /* The macvlan device has to be a single Q device so that the
7668 * tc_to_txq field can be reused to pick the tx queue.
7670 if (netif_is_multiqueue(vdev))
7671 return ERR_PTR(-ERANGE);
7673 if (!vsi->macvlan_cnt) {
7674 /* reserve bit 0 for the pf device */
7675 set_bit(0, vsi->fwd_bitmask);
7677 /* Try to reserve as many queues as possible for macvlans. First
7678 * reserve 3/4th of max vectors, then half, then quarter and
7679 * calculate Qs per macvlan as you go
7681 vectors = pf->num_lan_msix;
7682 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7683 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7685 macvlan_cnt = (vectors - 32) / 4;
7686 } else if (vectors <= 64 && vectors > 32) {
7687 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7689 macvlan_cnt = (vectors - 16) / 2;
7690 } else if (vectors <= 32 && vectors > 16) {
7691 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7693 macvlan_cnt = vectors - 16;
7694 } else if (vectors <= 16 && vectors > 8) {
7695 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7697 macvlan_cnt = vectors - 8;
7699 /* allocate 1 Q per macvlan and 1 Q to the PF */
7701 macvlan_cnt = vectors - 1;
7704 if (macvlan_cnt == 0)
7705 return ERR_PTR(-EBUSY);
7707 /* Quiesce VSI queues */
7708 i40e_quiesce_vsi(vsi);
7710 /* sets up the macvlans but does not "enable" them */
7711 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7714 return ERR_PTR(ret);
7717 i40e_unquiesce_vsi(vsi);
7719 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7721 if (avail_macvlan >= I40E_MAX_MACVLANS)
7722 return ERR_PTR(-EBUSY);
7724 /* create the fwd struct */
7725 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7727 return ERR_PTR(-ENOMEM);
7729 set_bit(avail_macvlan, vsi->fwd_bitmask);
7730 fwd->bit_no = avail_macvlan;
7731 netdev_set_sb_channel(vdev, avail_macvlan);
7734 if (!netif_running(netdev))
7737 /* Set fwd ring up */
7738 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7740 /* unbind the queues and drop the subordinate channel config */
7741 netdev_unbind_sb_channel(netdev, vdev);
7742 netdev_set_sb_channel(vdev, 0);
7745 return ERR_PTR(-EINVAL);
7752 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7753 * @vsi: the VSI we want to access
7755 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7757 struct i40e_channel *ch, *ch_tmp;
7758 struct i40e_pf *pf = vsi->back;
7759 struct i40e_hw *hw = &pf->hw;
7760 int aq_err, ret = 0;
7762 if (list_empty(&vsi->macvlan_list))
7765 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7766 if (i40e_is_channel_macvlan(ch)) {
7767 ret = i40e_del_macvlan_filter(hw, ch->seid,
7768 i40e_channel_mac(ch),
7771 /* Reset queue contexts */
7772 i40e_reset_ch_rings(vsi, ch);
7773 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7774 netdev_unbind_sb_channel(vsi->netdev,
7776 netdev_set_sb_channel(ch->fwd->netdev, 0);
7785 * i40e_fwd_del - delete macvlan interfaces
7786 * @netdev: net device to configure
7787 * @vdev: macvlan netdevice
7789 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7791 struct i40e_netdev_priv *np = netdev_priv(netdev);
7792 struct i40e_fwd_adapter *fwd = vdev;
7793 struct i40e_channel *ch, *ch_tmp;
7794 struct i40e_vsi *vsi = np->vsi;
7795 struct i40e_pf *pf = vsi->back;
7796 struct i40e_hw *hw = &pf->hw;
7797 int aq_err, ret = 0;
7799 /* Find the channel associated with the macvlan and del mac filter */
7800 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7801 if (i40e_is_channel_macvlan(ch) &&
7802 ether_addr_equal(i40e_channel_mac(ch),
7803 fwd->netdev->dev_addr)) {
7804 ret = i40e_del_macvlan_filter(hw, ch->seid,
7805 i40e_channel_mac(ch),
7808 /* Reset queue contexts */
7809 i40e_reset_ch_rings(vsi, ch);
7810 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7811 netdev_unbind_sb_channel(netdev, fwd->netdev);
7812 netdev_set_sb_channel(fwd->netdev, 0);
7816 dev_info(&pf->pdev->dev,
7817 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7818 i40e_stat_str(hw, ret),
7819 i40e_aq_str(hw, aq_err));
7827 * i40e_setup_tc - configure multiple traffic classes
7828 * @netdev: net device to configure
7829 * @type_data: tc offload data
7831 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7833 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7834 struct i40e_netdev_priv *np = netdev_priv(netdev);
7835 struct i40e_vsi *vsi = np->vsi;
7836 struct i40e_pf *pf = vsi->back;
7837 u8 enabled_tc = 0, num_tc, hw;
7838 bool need_reset = false;
7839 int old_queue_pairs;
7844 old_queue_pairs = vsi->num_queue_pairs;
7845 num_tc = mqprio_qopt->qopt.num_tc;
7846 hw = mqprio_qopt->qopt.hw;
7847 mode = mqprio_qopt->mode;
7849 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7850 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7854 /* Check if MFP enabled */
7855 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7857 "Configuring TC not supported in MFP mode\n");
7861 case TC_MQPRIO_MODE_DCB:
7862 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7864 /* Check if DCB enabled to continue */
7865 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7867 "DCB is not enabled for adapter\n");
7871 /* Check whether tc count is within enabled limit */
7872 if (num_tc > i40e_pf_get_num_tc(pf)) {
7874 "TC count greater than enabled on link for adapter\n");
7878 case TC_MQPRIO_MODE_CHANNEL:
7879 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7881 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7884 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7886 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7889 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7890 sizeof(*mqprio_qopt));
7891 pf->flags |= I40E_FLAG_TC_MQPRIO;
7892 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7899 /* Generate TC map for number of tc requested */
7900 for (i = 0; i < num_tc; i++)
7901 enabled_tc |= BIT(i);
7903 /* Requesting same TC configuration as already enabled */
7904 if (enabled_tc == vsi->tc_config.enabled_tc &&
7905 mode != TC_MQPRIO_MODE_CHANNEL)
7908 /* Quiesce VSI queues */
7909 i40e_quiesce_vsi(vsi);
7911 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7912 i40e_remove_queue_channels(vsi);
7914 /* Configure VSI for enabled TCs */
7915 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7917 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7922 dev_info(&vsi->back->pdev->dev,
7923 "Setup channel (id:%u) utilizing num_queues %d\n",
7924 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7927 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7928 if (vsi->mqprio_qopt.max_rate[0]) {
7929 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7931 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7932 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7934 u64 credits = max_tx_rate;
7936 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7937 dev_dbg(&vsi->back->pdev->dev,
7938 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7947 ret = i40e_configure_queue_channels(vsi);
7949 vsi->num_queue_pairs = old_queue_pairs;
7951 "Failed configuring queue channels\n");
7958 /* Reset the configuration data to defaults, only TC0 is enabled */
7960 i40e_vsi_set_default_tc_config(vsi);
7965 i40e_unquiesce_vsi(vsi);
7970 * i40e_set_cld_element - sets cloud filter element data
7971 * @filter: cloud filter rule
7972 * @cld: ptr to cloud filter element data
7974 * This is helper function to copy data into cloud filter element
7977 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7978 struct i40e_aqc_cloud_filters_element_data *cld)
7983 memset(cld, 0, sizeof(*cld));
7984 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7985 ether_addr_copy(cld->inner_mac, filter->src_mac);
7987 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7990 if (filter->n_proto == ETH_P_IPV6) {
7991 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7992 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7994 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7995 ipa = cpu_to_le32(ipa);
7996 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7999 ipa = be32_to_cpu(filter->dst_ipv4);
8000 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8003 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8005 /* tenant_id is not supported by FW now, once the support is enabled
8006 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8008 if (filter->tenant_id)
8013 * i40e_add_del_cloud_filter - Add/del cloud filter
8014 * @vsi: pointer to VSI
8015 * @filter: cloud filter rule
8016 * @add: if true, add, if false, delete
8018 * Add or delete a cloud filter for a specific flow spec.
8019 * Returns 0 if the filter were successfully added.
8021 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8022 struct i40e_cloud_filter *filter, bool add)
8024 struct i40e_aqc_cloud_filters_element_data cld_filter;
8025 struct i40e_pf *pf = vsi->back;
8027 static const u16 flag_table[128] = {
8028 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8029 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8030 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8031 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8032 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8033 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8034 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8035 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8036 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8037 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8038 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8039 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8040 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8041 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8044 if (filter->flags >= ARRAY_SIZE(flag_table))
8045 return I40E_ERR_CONFIG;
8047 /* copy element needed to add cloud filter from filter */
8048 i40e_set_cld_element(filter, &cld_filter);
8050 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8051 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8052 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8054 if (filter->n_proto == ETH_P_IPV6)
8055 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8056 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8058 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8059 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8062 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8065 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8068 dev_dbg(&pf->pdev->dev,
8069 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8070 add ? "add" : "delete", filter->dst_port, ret,
8071 pf->hw.aq.asq_last_status);
8073 dev_info(&pf->pdev->dev,
8074 "%s cloud filter for VSI: %d\n",
8075 add ? "Added" : "Deleted", filter->seid);
8080 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8081 * @vsi: pointer to VSI
8082 * @filter: cloud filter rule
8083 * @add: if true, add, if false, delete
8085 * Add or delete a cloud filter for a specific flow spec using big buffer.
8086 * Returns 0 if the filter were successfully added.
8088 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8089 struct i40e_cloud_filter *filter,
8092 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8093 struct i40e_pf *pf = vsi->back;
8096 /* Both (src/dst) valid mac_addr are not supported */
8097 if ((is_valid_ether_addr(filter->dst_mac) &&
8098 is_valid_ether_addr(filter->src_mac)) ||
8099 (is_multicast_ether_addr(filter->dst_mac) &&
8100 is_multicast_ether_addr(filter->src_mac)))
8103 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8104 * ports are not supported via big buffer now.
8106 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8109 /* adding filter using src_port/src_ip is not supported at this stage */
8110 if (filter->src_port || filter->src_ipv4 ||
8111 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8114 /* copy element needed to add cloud filter from filter */
8115 i40e_set_cld_element(filter, &cld_filter.element);
8117 if (is_valid_ether_addr(filter->dst_mac) ||
8118 is_valid_ether_addr(filter->src_mac) ||
8119 is_multicast_ether_addr(filter->dst_mac) ||
8120 is_multicast_ether_addr(filter->src_mac)) {
8121 /* MAC + IP : unsupported mode */
8122 if (filter->dst_ipv4)
8125 /* since we validated that L4 port must be valid before
8126 * we get here, start with respective "flags" value
8127 * and update if vlan is present or not
8129 cld_filter.element.flags =
8130 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8132 if (filter->vlan_id) {
8133 cld_filter.element.flags =
8134 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8137 } else if (filter->dst_ipv4 ||
8138 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8139 cld_filter.element.flags =
8140 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8141 if (filter->n_proto == ETH_P_IPV6)
8142 cld_filter.element.flags |=
8143 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8145 cld_filter.element.flags |=
8146 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8148 dev_err(&pf->pdev->dev,
8149 "either mac or ip has to be valid for cloud filter\n");
8153 /* Now copy L4 port in Byte 6..7 in general fields */
8154 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8155 be16_to_cpu(filter->dst_port);
8158 /* Validate current device switch mode, change if necessary */
8159 ret = i40e_validate_and_set_switch_mode(vsi);
8161 dev_err(&pf->pdev->dev,
8162 "failed to set switch mode, ret %d\n",
8167 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8170 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8175 dev_dbg(&pf->pdev->dev,
8176 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8177 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8179 dev_info(&pf->pdev->dev,
8180 "%s cloud filter for VSI: %d, L4 port: %d\n",
8181 add ? "add" : "delete", filter->seid,
8182 ntohs(filter->dst_port));
8187 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8188 * @vsi: Pointer to VSI
8189 * @f: Pointer to struct flow_cls_offload
8190 * @filter: Pointer to cloud filter structure
8193 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8194 struct flow_cls_offload *f,
8195 struct i40e_cloud_filter *filter)
8197 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8198 struct flow_dissector *dissector = rule->match.dissector;
8199 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8200 struct i40e_pf *pf = vsi->back;
8203 if (dissector->used_keys &
8204 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
8205 BIT(FLOW_DISSECTOR_KEY_BASIC) |
8206 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8207 BIT(FLOW_DISSECTOR_KEY_VLAN) |
8208 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8209 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8210 BIT(FLOW_DISSECTOR_KEY_PORTS) |
8211 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8212 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
8213 dissector->used_keys);
8217 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8218 struct flow_match_enc_keyid match;
8220 flow_rule_match_enc_keyid(rule, &match);
8221 if (match.mask->keyid != 0)
8222 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8224 filter->tenant_id = be32_to_cpu(match.key->keyid);
8227 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8228 struct flow_match_basic match;
8230 flow_rule_match_basic(rule, &match);
8231 n_proto_key = ntohs(match.key->n_proto);
8232 n_proto_mask = ntohs(match.mask->n_proto);
8234 if (n_proto_key == ETH_P_ALL) {
8238 filter->n_proto = n_proto_key & n_proto_mask;
8239 filter->ip_proto = match.key->ip_proto;
8242 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8243 struct flow_match_eth_addrs match;
8245 flow_rule_match_eth_addrs(rule, &match);
8247 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8248 if (!is_zero_ether_addr(match.mask->dst)) {
8249 if (is_broadcast_ether_addr(match.mask->dst)) {
8250 field_flags |= I40E_CLOUD_FIELD_OMAC;
8252 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8254 return I40E_ERR_CONFIG;
8258 if (!is_zero_ether_addr(match.mask->src)) {
8259 if (is_broadcast_ether_addr(match.mask->src)) {
8260 field_flags |= I40E_CLOUD_FIELD_IMAC;
8262 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8264 return I40E_ERR_CONFIG;
8267 ether_addr_copy(filter->dst_mac, match.key->dst);
8268 ether_addr_copy(filter->src_mac, match.key->src);
8271 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8272 struct flow_match_vlan match;
8274 flow_rule_match_vlan(rule, &match);
8275 if (match.mask->vlan_id) {
8276 if (match.mask->vlan_id == VLAN_VID_MASK) {
8277 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8280 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8281 match.mask->vlan_id);
8282 return I40E_ERR_CONFIG;
8286 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8289 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8290 struct flow_match_control match;
8292 flow_rule_match_control(rule, &match);
8293 addr_type = match.key->addr_type;
8296 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8297 struct flow_match_ipv4_addrs match;
8299 flow_rule_match_ipv4_addrs(rule, &match);
8300 if (match.mask->dst) {
8301 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8302 field_flags |= I40E_CLOUD_FIELD_IIP;
8304 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8306 return I40E_ERR_CONFIG;
8310 if (match.mask->src) {
8311 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8312 field_flags |= I40E_CLOUD_FIELD_IIP;
8314 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8316 return I40E_ERR_CONFIG;
8320 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8321 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8322 return I40E_ERR_CONFIG;
8324 filter->dst_ipv4 = match.key->dst;
8325 filter->src_ipv4 = match.key->src;
8328 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8329 struct flow_match_ipv6_addrs match;
8331 flow_rule_match_ipv6_addrs(rule, &match);
8333 /* src and dest IPV6 address should not be LOOPBACK
8334 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8336 if (ipv6_addr_loopback(&match.key->dst) ||
8337 ipv6_addr_loopback(&match.key->src)) {
8338 dev_err(&pf->pdev->dev,
8339 "Bad ipv6, addr is LOOPBACK\n");
8340 return I40E_ERR_CONFIG;
8342 if (!ipv6_addr_any(&match.mask->dst) ||
8343 !ipv6_addr_any(&match.mask->src))
8344 field_flags |= I40E_CLOUD_FIELD_IIP;
8346 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8347 sizeof(filter->src_ipv6));
8348 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8349 sizeof(filter->dst_ipv6));
8352 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8353 struct flow_match_ports match;
8355 flow_rule_match_ports(rule, &match);
8356 if (match.mask->src) {
8357 if (match.mask->src == cpu_to_be16(0xffff)) {
8358 field_flags |= I40E_CLOUD_FIELD_IIP;
8360 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8361 be16_to_cpu(match.mask->src));
8362 return I40E_ERR_CONFIG;
8366 if (match.mask->dst) {
8367 if (match.mask->dst == cpu_to_be16(0xffff)) {
8368 field_flags |= I40E_CLOUD_FIELD_IIP;
8370 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8371 be16_to_cpu(match.mask->dst));
8372 return I40E_ERR_CONFIG;
8376 filter->dst_port = match.key->dst;
8377 filter->src_port = match.key->src;
8379 switch (filter->ip_proto) {
8384 dev_err(&pf->pdev->dev,
8385 "Only UDP and TCP transport are supported\n");
8389 filter->flags = field_flags;
8394 * i40e_handle_tclass: Forward to a traffic class on the device
8395 * @vsi: Pointer to VSI
8396 * @tc: traffic class index on the device
8397 * @filter: Pointer to cloud filter structure
8400 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8401 struct i40e_cloud_filter *filter)
8403 struct i40e_channel *ch, *ch_tmp;
8405 /* direct to a traffic class on the same device */
8407 filter->seid = vsi->seid;
8409 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8410 if (!filter->dst_port) {
8411 dev_err(&vsi->back->pdev->dev,
8412 "Specify destination port to direct to traffic class that is not default\n");
8415 if (list_empty(&vsi->ch_list))
8417 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8419 if (ch->seid == vsi->tc_seid_map[tc])
8420 filter->seid = ch->seid;
8424 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8429 * i40e_configure_clsflower - Configure tc flower filters
8430 * @vsi: Pointer to VSI
8431 * @cls_flower: Pointer to struct flow_cls_offload
8434 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8435 struct flow_cls_offload *cls_flower)
8437 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8438 struct i40e_cloud_filter *filter = NULL;
8439 struct i40e_pf *pf = vsi->back;
8443 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8447 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8448 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8451 if (pf->fdir_pf_active_filters ||
8452 (!hlist_empty(&pf->fdir_filter_list))) {
8453 dev_err(&vsi->back->pdev->dev,
8454 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8458 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8459 dev_err(&vsi->back->pdev->dev,
8460 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8461 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8462 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8465 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8469 filter->cookie = cls_flower->cookie;
8471 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8475 err = i40e_handle_tclass(vsi, tc, filter);
8479 /* Add cloud filter */
8480 if (filter->dst_port)
8481 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8483 err = i40e_add_del_cloud_filter(vsi, filter, true);
8486 dev_err(&pf->pdev->dev,
8487 "Failed to add cloud filter, err %s\n",
8488 i40e_stat_str(&pf->hw, err));
8492 /* add filter to the ordered list */
8493 INIT_HLIST_NODE(&filter->cloud_node);
8495 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8497 pf->num_cloud_filters++;
8506 * i40e_find_cloud_filter - Find the could filter in the list
8507 * @vsi: Pointer to VSI
8508 * @cookie: filter specific cookie
8511 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8512 unsigned long *cookie)
8514 struct i40e_cloud_filter *filter = NULL;
8515 struct hlist_node *node2;
8517 hlist_for_each_entry_safe(filter, node2,
8518 &vsi->back->cloud_filter_list, cloud_node)
8519 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8525 * i40e_delete_clsflower - Remove tc flower filters
8526 * @vsi: Pointer to VSI
8527 * @cls_flower: Pointer to struct flow_cls_offload
8530 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8531 struct flow_cls_offload *cls_flower)
8533 struct i40e_cloud_filter *filter = NULL;
8534 struct i40e_pf *pf = vsi->back;
8537 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8542 hash_del(&filter->cloud_node);
8544 if (filter->dst_port)
8545 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8547 err = i40e_add_del_cloud_filter(vsi, filter, false);
8551 dev_err(&pf->pdev->dev,
8552 "Failed to delete cloud filter, err %s\n",
8553 i40e_stat_str(&pf->hw, err));
8554 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8557 pf->num_cloud_filters--;
8558 if (!pf->num_cloud_filters)
8559 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8560 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8561 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8562 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8563 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8569 * i40e_setup_tc_cls_flower - flower classifier offloads
8570 * @np: net device to configure
8571 * @cls_flower: offload data
8573 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8574 struct flow_cls_offload *cls_flower)
8576 struct i40e_vsi *vsi = np->vsi;
8578 switch (cls_flower->command) {
8579 case FLOW_CLS_REPLACE:
8580 return i40e_configure_clsflower(vsi, cls_flower);
8581 case FLOW_CLS_DESTROY:
8582 return i40e_delete_clsflower(vsi, cls_flower);
8583 case FLOW_CLS_STATS:
8590 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8593 struct i40e_netdev_priv *np = cb_priv;
8595 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8599 case TC_SETUP_CLSFLOWER:
8600 return i40e_setup_tc_cls_flower(np, type_data);
8607 static LIST_HEAD(i40e_block_cb_list);
8609 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8612 struct i40e_netdev_priv *np = netdev_priv(netdev);
8615 case TC_SETUP_QDISC_MQPRIO:
8616 return i40e_setup_tc(netdev, type_data);
8617 case TC_SETUP_BLOCK:
8618 return flow_block_cb_setup_simple(type_data,
8619 &i40e_block_cb_list,
8620 i40e_setup_tc_block_cb,
8628 * i40e_open - Called when a network interface is made active
8629 * @netdev: network interface device structure
8631 * The open entry point is called when a network interface is made
8632 * active by the system (IFF_UP). At this point all resources needed
8633 * for transmit and receive operations are allocated, the interrupt
8634 * handler is registered with the OS, the netdev watchdog subtask is
8635 * enabled, and the stack is notified that the interface is ready.
8637 * Returns 0 on success, negative value on failure
8639 int i40e_open(struct net_device *netdev)
8641 struct i40e_netdev_priv *np = netdev_priv(netdev);
8642 struct i40e_vsi *vsi = np->vsi;
8643 struct i40e_pf *pf = vsi->back;
8646 /* disallow open during test or if eeprom is broken */
8647 if (test_bit(__I40E_TESTING, pf->state) ||
8648 test_bit(__I40E_BAD_EEPROM, pf->state))
8651 netif_carrier_off(netdev);
8653 if (i40e_force_link_state(pf, true))
8656 err = i40e_vsi_open(vsi);
8660 /* configure global TSO hardware offload settings */
8661 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8662 TCP_FLAG_FIN) >> 16);
8663 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8665 TCP_FLAG_CWR) >> 16);
8666 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8667 udp_tunnel_get_rx_info(netdev);
8674 * @vsi: the VSI to open
8676 * Finish initialization of the VSI.
8678 * Returns 0 on success, negative value on failure
8680 * Note: expects to be called while under rtnl_lock()
8682 int i40e_vsi_open(struct i40e_vsi *vsi)
8684 struct i40e_pf *pf = vsi->back;
8685 char int_name[I40E_INT_NAME_STR_LEN];
8688 /* allocate descriptors */
8689 err = i40e_vsi_setup_tx_resources(vsi);
8692 err = i40e_vsi_setup_rx_resources(vsi);
8696 err = i40e_vsi_configure(vsi);
8701 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8702 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8703 err = i40e_vsi_request_irq(vsi, int_name);
8707 /* Notify the stack of the actual queue counts. */
8708 err = netif_set_real_num_tx_queues(vsi->netdev,
8709 vsi->num_queue_pairs);
8711 goto err_set_queues;
8713 err = netif_set_real_num_rx_queues(vsi->netdev,
8714 vsi->num_queue_pairs);
8716 goto err_set_queues;
8718 } else if (vsi->type == I40E_VSI_FDIR) {
8719 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8720 dev_driver_string(&pf->pdev->dev),
8721 dev_name(&pf->pdev->dev));
8722 err = i40e_vsi_request_irq(vsi, int_name);
8729 err = i40e_up_complete(vsi);
8731 goto err_up_complete;
8738 i40e_vsi_free_irq(vsi);
8740 i40e_vsi_free_rx_resources(vsi);
8742 i40e_vsi_free_tx_resources(vsi);
8743 if (vsi == pf->vsi[pf->lan_vsi])
8744 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8750 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8751 * @pf: Pointer to PF
8753 * This function destroys the hlist where all the Flow Director
8754 * filters were saved.
8756 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8758 struct i40e_fdir_filter *filter;
8759 struct i40e_flex_pit *pit_entry, *tmp;
8760 struct hlist_node *node2;
8762 hlist_for_each_entry_safe(filter, node2,
8763 &pf->fdir_filter_list, fdir_node) {
8764 hlist_del(&filter->fdir_node);
8768 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8769 list_del(&pit_entry->list);
8772 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8774 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8775 list_del(&pit_entry->list);
8778 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8780 pf->fdir_pf_active_filters = 0;
8781 i40e_reset_fdir_filter_cnt(pf);
8783 /* Reprogram the default input set for TCP/IPv4 */
8784 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8785 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8786 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8788 /* Reprogram the default input set for TCP/IPv6 */
8789 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8790 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8791 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8793 /* Reprogram the default input set for UDP/IPv4 */
8794 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8795 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8796 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8798 /* Reprogram the default input set for UDP/IPv6 */
8799 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8800 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8801 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8803 /* Reprogram the default input set for SCTP/IPv4 */
8804 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8805 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8806 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8808 /* Reprogram the default input set for SCTP/IPv6 */
8809 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8810 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
8811 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8813 /* Reprogram the default input set for Other/IPv4 */
8814 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8815 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8817 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8818 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8820 /* Reprogram the default input set for Other/IPv6 */
8821 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8822 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8824 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
8825 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8829 * i40e_cloud_filter_exit - Cleans up the cloud filters
8830 * @pf: Pointer to PF
8832 * This function destroys the hlist where all the cloud filters
8835 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8837 struct i40e_cloud_filter *cfilter;
8838 struct hlist_node *node;
8840 hlist_for_each_entry_safe(cfilter, node,
8841 &pf->cloud_filter_list, cloud_node) {
8842 hlist_del(&cfilter->cloud_node);
8845 pf->num_cloud_filters = 0;
8847 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8848 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8849 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8850 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8851 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8856 * i40e_close - Disables a network interface
8857 * @netdev: network interface device structure
8859 * The close entry point is called when an interface is de-activated
8860 * by the OS. The hardware is still under the driver's control, but
8861 * this netdev interface is disabled.
8863 * Returns 0, this is not allowed to fail
8865 int i40e_close(struct net_device *netdev)
8867 struct i40e_netdev_priv *np = netdev_priv(netdev);
8868 struct i40e_vsi *vsi = np->vsi;
8870 i40e_vsi_close(vsi);
8876 * i40e_do_reset - Start a PF or Core Reset sequence
8877 * @pf: board private structure
8878 * @reset_flags: which reset is requested
8879 * @lock_acquired: indicates whether or not the lock has been acquired
8880 * before this function was called.
8882 * The essential difference in resets is that the PF Reset
8883 * doesn't clear the packet buffers, doesn't reset the PE
8884 * firmware, and doesn't bother the other PFs on the chip.
8886 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8890 /* do the biggest reset indicated */
8891 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8893 /* Request a Global Reset
8895 * This will start the chip's countdown to the actual full
8896 * chip reset event, and a warning interrupt to be sent
8897 * to all PFs, including the requestor. Our handler
8898 * for the warning interrupt will deal with the shutdown
8899 * and recovery of the switch setup.
8901 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8902 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8903 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8904 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8906 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8908 /* Request a Core Reset
8910 * Same as Global Reset, except does *not* include the MAC/PHY
8912 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8913 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8914 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8915 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8916 i40e_flush(&pf->hw);
8918 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8920 /* Request a PF Reset
8922 * Resets only the PF-specific registers
8924 * This goes directly to the tear-down and rebuild of
8925 * the switch, since we need to do all the recovery as
8926 * for the Core Reset.
8928 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8929 i40e_handle_reset_warning(pf, lock_acquired);
8931 dev_info(&pf->pdev->dev,
8932 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8933 "FW LLDP is disabled\n" :
8934 "FW LLDP is enabled\n");
8936 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
8937 /* Request a PF Reset
8939 * Resets PF and reinitializes PFs VSI.
8941 i40e_prep_for_reset(pf);
8942 i40e_reset_and_rebuild(pf, true, lock_acquired);
8944 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8947 /* Find the VSI(s) that requested a re-init */
8948 dev_info(&pf->pdev->dev,
8949 "VSI reinit requested\n");
8950 for (v = 0; v < pf->num_alloc_vsi; v++) {
8951 struct i40e_vsi *vsi = pf->vsi[v];
8954 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8956 i40e_vsi_reinit_locked(pf->vsi[v]);
8958 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8961 /* Find the VSI(s) that needs to be brought down */
8962 dev_info(&pf->pdev->dev, "VSI down requested\n");
8963 for (v = 0; v < pf->num_alloc_vsi; v++) {
8964 struct i40e_vsi *vsi = pf->vsi[v];
8967 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8969 set_bit(__I40E_VSI_DOWN, vsi->state);
8974 dev_info(&pf->pdev->dev,
8975 "bad reset request 0x%08x\n", reset_flags);
8979 #ifdef CONFIG_I40E_DCB
8981 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8982 * @pf: board private structure
8983 * @old_cfg: current DCB config
8984 * @new_cfg: new DCB config
8986 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8987 struct i40e_dcbx_config *old_cfg,
8988 struct i40e_dcbx_config *new_cfg)
8990 bool need_reconfig = false;
8992 /* Check if ETS configuration has changed */
8993 if (memcmp(&new_cfg->etscfg,
8995 sizeof(new_cfg->etscfg))) {
8996 /* If Priority Table has changed reconfig is needed */
8997 if (memcmp(&new_cfg->etscfg.prioritytable,
8998 &old_cfg->etscfg.prioritytable,
8999 sizeof(new_cfg->etscfg.prioritytable))) {
9000 need_reconfig = true;
9001 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9004 if (memcmp(&new_cfg->etscfg.tcbwtable,
9005 &old_cfg->etscfg.tcbwtable,
9006 sizeof(new_cfg->etscfg.tcbwtable)))
9007 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9009 if (memcmp(&new_cfg->etscfg.tsatable,
9010 &old_cfg->etscfg.tsatable,
9011 sizeof(new_cfg->etscfg.tsatable)))
9012 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9015 /* Check if PFC configuration has changed */
9016 if (memcmp(&new_cfg->pfc,
9018 sizeof(new_cfg->pfc))) {
9019 need_reconfig = true;
9020 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9023 /* Check if APP Table has changed */
9024 if (memcmp(&new_cfg->app,
9026 sizeof(new_cfg->app))) {
9027 need_reconfig = true;
9028 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9031 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9032 return need_reconfig;
9036 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9037 * @pf: board private structure
9038 * @e: event info posted on ARQ
9040 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9041 struct i40e_arq_event_info *e)
9043 struct i40e_aqc_lldp_get_mib *mib =
9044 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9045 struct i40e_hw *hw = &pf->hw;
9046 struct i40e_dcbx_config tmp_dcbx_cfg;
9047 bool need_reconfig = false;
9051 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9052 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9053 (hw->phy.link_info.link_speed &
9054 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9055 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9056 /* let firmware decide if the DCB should be disabled */
9057 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9059 /* Not DCB capable or capability disabled */
9060 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9063 /* Ignore if event is not for Nearest Bridge */
9064 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9065 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9066 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9067 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9070 /* Check MIB Type and return if event for Remote MIB update */
9071 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9072 dev_dbg(&pf->pdev->dev,
9073 "LLDP event mib type %s\n", type ? "remote" : "local");
9074 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9075 /* Update the remote cached instance and return */
9076 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9077 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9078 &hw->remote_dcbx_config);
9082 /* Store the old configuration */
9083 tmp_dcbx_cfg = hw->local_dcbx_config;
9085 /* Reset the old DCBx configuration data */
9086 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9087 /* Get updated DCBX data from firmware */
9088 ret = i40e_get_dcb_config(&pf->hw);
9090 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9091 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9092 (hw->phy.link_info.link_speed &
9093 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9094 dev_warn(&pf->pdev->dev,
9095 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9096 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9098 dev_info(&pf->pdev->dev,
9099 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
9100 i40e_stat_str(&pf->hw, ret),
9101 i40e_aq_str(&pf->hw,
9102 pf->hw.aq.asq_last_status));
9107 /* No change detected in DCBX configs */
9108 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9109 sizeof(tmp_dcbx_cfg))) {
9110 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9114 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9115 &hw->local_dcbx_config);
9117 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9122 /* Enable DCB tagging only when more than one TC */
9123 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9124 pf->flags |= I40E_FLAG_DCB_ENABLED;
9126 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9128 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9129 /* Reconfiguration needed quiesce all VSIs */
9130 i40e_pf_quiesce_all_vsi(pf);
9132 /* Changes in configuration update VEB/VSI */
9133 i40e_dcb_reconfigure(pf);
9135 ret = i40e_resume_port_tx(pf);
9137 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9138 /* In case of error no point in resuming VSIs */
9142 /* Wait for the PF's queues to be disabled */
9143 ret = i40e_pf_wait_queues_disabled(pf);
9145 /* Schedule PF reset to recover */
9146 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9147 i40e_service_event_schedule(pf);
9149 i40e_pf_unquiesce_all_vsi(pf);
9150 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9151 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9157 #endif /* CONFIG_I40E_DCB */
9160 * i40e_do_reset_safe - Protected reset path for userland calls.
9161 * @pf: board private structure
9162 * @reset_flags: which reset is requested
9165 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9168 i40e_do_reset(pf, reset_flags, true);
9173 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9174 * @pf: board private structure
9175 * @e: event info posted on ARQ
9177 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9180 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9181 struct i40e_arq_event_info *e)
9183 struct i40e_aqc_lan_overflow *data =
9184 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9185 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9186 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9187 struct i40e_hw *hw = &pf->hw;
9191 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9194 /* Queue belongs to VF, find the VF and issue VF reset */
9195 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9196 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9197 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9198 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9199 vf_id -= hw->func_caps.vf_base_id;
9200 vf = &pf->vf[vf_id];
9201 i40e_vc_notify_vf_reset(vf);
9202 /* Allow VF to process pending reset notification */
9204 i40e_reset_vf(vf, false);
9209 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9210 * @pf: board private structure
9212 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9216 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9217 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9222 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9223 * @pf: board private structure
9225 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9229 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9230 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9231 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9232 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9237 * i40e_get_global_fd_count - Get total FD filters programmed on device
9238 * @pf: board private structure
9240 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9244 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9245 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9246 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9247 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9252 * i40e_reenable_fdir_sb - Restore FDir SB capability
9253 * @pf: board private structure
9255 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9257 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9258 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9259 (I40E_DEBUG_FD & pf->hw.debug_mask))
9260 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9264 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9265 * @pf: board private structure
9267 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9269 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9270 /* ATR uses the same filtering logic as SB rules. It only
9271 * functions properly if the input set mask is at the default
9272 * settings. It is safe to restore the default input set
9273 * because there are no active TCPv4 filter rules.
9275 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9276 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9277 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9279 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9280 (I40E_DEBUG_FD & pf->hw.debug_mask))
9281 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9286 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9287 * @pf: board private structure
9288 * @filter: FDir filter to remove
9290 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9291 struct i40e_fdir_filter *filter)
9293 /* Update counters */
9294 pf->fdir_pf_active_filters--;
9297 switch (filter->flow_type) {
9299 pf->fd_tcp4_filter_cnt--;
9302 pf->fd_udp4_filter_cnt--;
9305 pf->fd_sctp4_filter_cnt--;
9308 pf->fd_tcp6_filter_cnt--;
9311 pf->fd_udp6_filter_cnt--;
9314 pf->fd_udp6_filter_cnt--;
9317 switch (filter->ipl4_proto) {
9319 pf->fd_tcp4_filter_cnt--;
9322 pf->fd_udp4_filter_cnt--;
9325 pf->fd_sctp4_filter_cnt--;
9328 pf->fd_ip4_filter_cnt--;
9332 case IPV6_USER_FLOW:
9333 switch (filter->ipl4_proto) {
9335 pf->fd_tcp6_filter_cnt--;
9338 pf->fd_udp6_filter_cnt--;
9341 pf->fd_sctp6_filter_cnt--;
9344 pf->fd_ip6_filter_cnt--;
9350 /* Remove the filter from the list and free memory */
9351 hlist_del(&filter->fdir_node);
9356 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9357 * @pf: board private structure
9359 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9361 struct i40e_fdir_filter *filter;
9362 u32 fcnt_prog, fcnt_avail;
9363 struct hlist_node *node;
9365 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9368 /* Check if we have enough room to re-enable FDir SB capability. */
9369 fcnt_prog = i40e_get_global_fd_count(pf);
9370 fcnt_avail = pf->fdir_pf_filter_count;
9371 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9372 (pf->fd_add_err == 0) ||
9373 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9374 i40e_reenable_fdir_sb(pf);
9376 /* We should wait for even more space before re-enabling ATR.
9377 * Additionally, we cannot enable ATR as long as we still have TCP SB
9380 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9381 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9382 i40e_reenable_fdir_atr(pf);
9384 /* if hw had a problem adding a filter, delete it */
9385 if (pf->fd_inv > 0) {
9386 hlist_for_each_entry_safe(filter, node,
9387 &pf->fdir_filter_list, fdir_node)
9388 if (filter->fd_id == pf->fd_inv)
9389 i40e_delete_invalid_filter(pf, filter);
9393 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9394 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9396 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9397 * @pf: board private structure
9399 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9401 unsigned long min_flush_time;
9402 int flush_wait_retry = 50;
9403 bool disable_atr = false;
9407 if (!time_after(jiffies, pf->fd_flush_timestamp +
9408 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9411 /* If the flush is happening too quick and we have mostly SB rules we
9412 * should not re-enable ATR for some time.
9414 min_flush_time = pf->fd_flush_timestamp +
9415 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9416 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9418 if (!(time_after(jiffies, min_flush_time)) &&
9419 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9420 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9421 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9425 pf->fd_flush_timestamp = jiffies;
9426 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9427 /* flush all filters */
9428 wr32(&pf->hw, I40E_PFQF_CTL_1,
9429 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9430 i40e_flush(&pf->hw);
9434 /* Check FD flush status every 5-6msec */
9435 usleep_range(5000, 6000);
9436 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9437 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9439 } while (flush_wait_retry--);
9440 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9441 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9443 /* replay sideband filters */
9444 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9445 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9446 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9447 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9448 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9449 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9454 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
9455 * @pf: board private structure
9457 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9459 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9463 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9464 * @pf: board private structure
9466 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9469 /* if interface is down do nothing */
9470 if (test_bit(__I40E_DOWN, pf->state))
9473 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9474 i40e_fdir_flush_and_replay(pf);
9476 i40e_fdir_check_and_reenable(pf);
9481 * i40e_vsi_link_event - notify VSI of a link event
9482 * @vsi: vsi to be notified
9483 * @link_up: link up or down
9485 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9487 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9490 switch (vsi->type) {
9492 if (!vsi->netdev || !vsi->netdev_registered)
9496 netif_carrier_on(vsi->netdev);
9497 netif_tx_wake_all_queues(vsi->netdev);
9499 netif_carrier_off(vsi->netdev);
9500 netif_tx_stop_all_queues(vsi->netdev);
9504 case I40E_VSI_SRIOV:
9505 case I40E_VSI_VMDQ2:
9507 case I40E_VSI_IWARP:
9508 case I40E_VSI_MIRROR:
9510 /* there is no notification for other VSIs */
9516 * i40e_veb_link_event - notify elements on the veb of a link event
9517 * @veb: veb to be notified
9518 * @link_up: link up or down
9520 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9525 if (!veb || !veb->pf)
9529 /* depth first... */
9530 for (i = 0; i < I40E_MAX_VEB; i++)
9531 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9532 i40e_veb_link_event(pf->veb[i], link_up);
9534 /* ... now the local VSIs */
9535 for (i = 0; i < pf->num_alloc_vsi; i++)
9536 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9537 i40e_vsi_link_event(pf->vsi[i], link_up);
9541 * i40e_link_event - Update netif_carrier status
9542 * @pf: board private structure
9544 static void i40e_link_event(struct i40e_pf *pf)
9546 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9547 u8 new_link_speed, old_link_speed;
9549 bool new_link, old_link;
9550 #ifdef CONFIG_I40E_DCB
9552 #endif /* CONFIG_I40E_DCB */
9554 /* set this to force the get_link_status call to refresh state */
9555 pf->hw.phy.get_link_info = true;
9556 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9557 status = i40e_get_link_status(&pf->hw, &new_link);
9559 /* On success, disable temp link polling */
9560 if (status == I40E_SUCCESS) {
9561 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9563 /* Enable link polling temporarily until i40e_get_link_status
9564 * returns I40E_SUCCESS
9566 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9567 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9572 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9573 new_link_speed = pf->hw.phy.link_info.link_speed;
9575 if (new_link == old_link &&
9576 new_link_speed == old_link_speed &&
9577 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9578 new_link == netif_carrier_ok(vsi->netdev)))
9581 i40e_print_link_message(vsi, new_link);
9583 /* Notify the base of the switch tree connected to
9584 * the link. Floating VEBs are not notified.
9586 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9587 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9589 i40e_vsi_link_event(vsi, new_link);
9592 i40e_vc_notify_link_state(pf);
9594 if (pf->flags & I40E_FLAG_PTP)
9595 i40e_ptp_set_increment(pf);
9596 #ifdef CONFIG_I40E_DCB
9597 if (new_link == old_link)
9599 /* Not SW DCB so firmware will take care of default settings */
9600 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9603 /* We cover here only link down, as after link up in case of SW DCB
9604 * SW LLDP agent will take care of setting it up
9607 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9608 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9609 err = i40e_dcb_sw_default_config(pf);
9611 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9612 I40E_FLAG_DCB_ENABLED);
9614 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9615 DCB_CAP_DCBX_VER_IEEE;
9616 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9617 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9620 #endif /* CONFIG_I40E_DCB */
9624 * i40e_watchdog_subtask - periodic checks not using event driven response
9625 * @pf: board private structure
9627 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9631 /* if interface is down do nothing */
9632 if (test_bit(__I40E_DOWN, pf->state) ||
9633 test_bit(__I40E_CONFIG_BUSY, pf->state))
9636 /* make sure we don't do these things too often */
9637 if (time_before(jiffies, (pf->service_timer_previous +
9638 pf->service_timer_period)))
9640 pf->service_timer_previous = jiffies;
9642 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9643 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9644 i40e_link_event(pf);
9646 /* Update the stats for active netdevs so the network stack
9647 * can look at updated numbers whenever it cares to
9649 for (i = 0; i < pf->num_alloc_vsi; i++)
9650 if (pf->vsi[i] && pf->vsi[i]->netdev)
9651 i40e_update_stats(pf->vsi[i]);
9653 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9654 /* Update the stats for the active switching components */
9655 for (i = 0; i < I40E_MAX_VEB; i++)
9657 i40e_update_veb_stats(pf->veb[i]);
9660 i40e_ptp_rx_hang(pf);
9661 i40e_ptp_tx_hang(pf);
9665 * i40e_reset_subtask - Set up for resetting the device and driver
9666 * @pf: board private structure
9668 static void i40e_reset_subtask(struct i40e_pf *pf)
9670 u32 reset_flags = 0;
9672 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9673 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9674 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9676 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9677 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9678 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9680 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9681 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9682 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9684 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9685 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9686 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9688 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9689 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9690 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9693 /* If there's a recovery already waiting, it takes
9694 * precedence before starting a new reset sequence.
9696 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9697 i40e_prep_for_reset(pf);
9699 i40e_rebuild(pf, false, false);
9702 /* If we're already down or resetting, just bail */
9704 !test_bit(__I40E_DOWN, pf->state) &&
9705 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9706 i40e_do_reset(pf, reset_flags, false);
9711 * i40e_handle_link_event - Handle link event
9712 * @pf: board private structure
9713 * @e: event info posted on ARQ
9715 static void i40e_handle_link_event(struct i40e_pf *pf,
9716 struct i40e_arq_event_info *e)
9718 struct i40e_aqc_get_link_status *status =
9719 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9721 /* Do a new status request to re-enable LSE reporting
9722 * and load new status information into the hw struct
9723 * This completely ignores any state information
9724 * in the ARQ event info, instead choosing to always
9725 * issue the AQ update link status command.
9727 i40e_link_event(pf);
9729 /* Check if module meets thermal requirements */
9730 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9731 dev_err(&pf->pdev->dev,
9732 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9733 dev_err(&pf->pdev->dev,
9734 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9736 /* check for unqualified module, if link is down, suppress
9737 * the message if link was forced to be down.
9739 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9740 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9741 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9742 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9743 dev_err(&pf->pdev->dev,
9744 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9745 dev_err(&pf->pdev->dev,
9746 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9752 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9753 * @pf: board private structure
9755 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9757 struct i40e_arq_event_info event;
9758 struct i40e_hw *hw = &pf->hw;
9765 /* Do not run clean AQ when PF reset fails */
9766 if (test_bit(__I40E_RESET_FAILED, pf->state))
9769 /* check for error indications */
9770 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9772 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9773 if (hw->debug_mask & I40E_DEBUG_AQ)
9774 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9775 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9777 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9778 if (hw->debug_mask & I40E_DEBUG_AQ)
9779 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9780 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9781 pf->arq_overflows++;
9783 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9784 if (hw->debug_mask & I40E_DEBUG_AQ)
9785 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9786 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9789 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9791 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9793 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9794 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9795 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9796 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9798 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9799 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9800 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9801 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9803 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9804 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9805 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9806 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9809 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9811 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9812 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9817 ret = i40e_clean_arq_element(hw, &event, &pending);
9818 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9821 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9825 opcode = le16_to_cpu(event.desc.opcode);
9828 case i40e_aqc_opc_get_link_status:
9830 i40e_handle_link_event(pf, &event);
9833 case i40e_aqc_opc_send_msg_to_pf:
9834 ret = i40e_vc_process_vf_msg(pf,
9835 le16_to_cpu(event.desc.retval),
9836 le32_to_cpu(event.desc.cookie_high),
9837 le32_to_cpu(event.desc.cookie_low),
9841 case i40e_aqc_opc_lldp_update_mib:
9842 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9843 #ifdef CONFIG_I40E_DCB
9845 i40e_handle_lldp_event(pf, &event);
9847 #endif /* CONFIG_I40E_DCB */
9849 case i40e_aqc_opc_event_lan_overflow:
9850 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9851 i40e_handle_lan_overflow_event(pf, &event);
9853 case i40e_aqc_opc_send_msg_to_peer:
9854 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9856 case i40e_aqc_opc_nvm_erase:
9857 case i40e_aqc_opc_nvm_update:
9858 case i40e_aqc_opc_oem_post_update:
9859 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9860 "ARQ NVM operation 0x%04x completed\n",
9864 dev_info(&pf->pdev->dev,
9865 "ARQ: Unknown event 0x%04x ignored\n",
9869 } while (i++ < pf->adminq_work_limit);
9871 if (i < pf->adminq_work_limit)
9872 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9874 /* re-enable Admin queue interrupt cause */
9875 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9876 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9877 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9880 kfree(event.msg_buf);
9884 * i40e_verify_eeprom - make sure eeprom is good to use
9885 * @pf: board private structure
9887 static void i40e_verify_eeprom(struct i40e_pf *pf)
9891 err = i40e_diag_eeprom_test(&pf->hw);
9893 /* retry in case of garbage read */
9894 err = i40e_diag_eeprom_test(&pf->hw);
9896 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9898 set_bit(__I40E_BAD_EEPROM, pf->state);
9902 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9903 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9904 clear_bit(__I40E_BAD_EEPROM, pf->state);
9909 * i40e_enable_pf_switch_lb
9910 * @pf: pointer to the PF structure
9912 * enable switch loop back or die - no point in a return value
9914 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9916 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9917 struct i40e_vsi_context ctxt;
9920 ctxt.seid = pf->main_vsi_seid;
9921 ctxt.pf_num = pf->hw.pf_id;
9923 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9925 dev_info(&pf->pdev->dev,
9926 "couldn't get PF vsi config, err %s aq_err %s\n",
9927 i40e_stat_str(&pf->hw, ret),
9928 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9931 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9932 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9933 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9935 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9937 dev_info(&pf->pdev->dev,
9938 "update vsi switch failed, err %s aq_err %s\n",
9939 i40e_stat_str(&pf->hw, ret),
9940 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9945 * i40e_disable_pf_switch_lb
9946 * @pf: pointer to the PF structure
9948 * disable switch loop back or die - no point in a return value
9950 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9952 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9953 struct i40e_vsi_context ctxt;
9956 ctxt.seid = pf->main_vsi_seid;
9957 ctxt.pf_num = pf->hw.pf_id;
9959 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9961 dev_info(&pf->pdev->dev,
9962 "couldn't get PF vsi config, err %s aq_err %s\n",
9963 i40e_stat_str(&pf->hw, ret),
9964 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9967 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9968 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9969 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9971 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9973 dev_info(&pf->pdev->dev,
9974 "update vsi switch failed, err %s aq_err %s\n",
9975 i40e_stat_str(&pf->hw, ret),
9976 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9981 * i40e_config_bridge_mode - Configure the HW bridge mode
9982 * @veb: pointer to the bridge instance
9984 * Configure the loop back mode for the LAN VSI that is downlink to the
9985 * specified HW bridge instance. It is expected this function is called
9986 * when a new HW bridge is instantiated.
9988 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9990 struct i40e_pf *pf = veb->pf;
9992 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9993 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9994 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9995 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9996 i40e_disable_pf_switch_lb(pf);
9998 i40e_enable_pf_switch_lb(pf);
10002 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10003 * @veb: pointer to the VEB instance
10005 * This is a recursive function that first builds the attached VSIs then
10006 * recurses in to build the next layer of VEB. We track the connections
10007 * through our own index numbers because the seid's from the HW could
10008 * change across the reset.
10010 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10012 struct i40e_vsi *ctl_vsi = NULL;
10013 struct i40e_pf *pf = veb->pf;
10017 /* build VSI that owns this VEB, temporarily attached to base VEB */
10018 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10020 pf->vsi[v]->veb_idx == veb->idx &&
10021 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10022 ctl_vsi = pf->vsi[v];
10027 dev_info(&pf->pdev->dev,
10028 "missing owner VSI for veb_idx %d\n", veb->idx);
10030 goto end_reconstitute;
10032 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10033 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10034 ret = i40e_add_vsi(ctl_vsi);
10036 dev_info(&pf->pdev->dev,
10037 "rebuild of veb_idx %d owner VSI failed: %d\n",
10039 goto end_reconstitute;
10041 i40e_vsi_reset_stats(ctl_vsi);
10043 /* create the VEB in the switch and move the VSI onto the VEB */
10044 ret = i40e_add_veb(veb, ctl_vsi);
10046 goto end_reconstitute;
10048 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10049 veb->bridge_mode = BRIDGE_MODE_VEB;
10051 veb->bridge_mode = BRIDGE_MODE_VEPA;
10052 i40e_config_bridge_mode(veb);
10054 /* create the remaining VSIs attached to this VEB */
10055 for (v = 0; v < pf->num_alloc_vsi; v++) {
10056 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10059 if (pf->vsi[v]->veb_idx == veb->idx) {
10060 struct i40e_vsi *vsi = pf->vsi[v];
10062 vsi->uplink_seid = veb->seid;
10063 ret = i40e_add_vsi(vsi);
10065 dev_info(&pf->pdev->dev,
10066 "rebuild of vsi_idx %d failed: %d\n",
10068 goto end_reconstitute;
10070 i40e_vsi_reset_stats(vsi);
10074 /* create any VEBs attached to this VEB - RECURSION */
10075 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10076 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10077 pf->veb[veb_idx]->uplink_seid = veb->seid;
10078 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
10089 * i40e_get_capabilities - get info about the HW
10090 * @pf: the PF struct
10091 * @list_type: AQ capability to be queried
10093 static int i40e_get_capabilities(struct i40e_pf *pf,
10094 enum i40e_admin_queue_opc list_type)
10096 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10101 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10103 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10107 /* this loads the data into the hw struct for us */
10108 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10109 &data_size, list_type,
10111 /* data loaded, buffer no longer needed */
10114 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10115 /* retry with a larger buffer */
10116 buf_len = data_size;
10117 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
10118 dev_info(&pf->pdev->dev,
10119 "capability discovery failed, err %s aq_err %s\n",
10120 i40e_stat_str(&pf->hw, err),
10121 i40e_aq_str(&pf->hw,
10122 pf->hw.aq.asq_last_status));
10127 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10128 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10129 dev_info(&pf->pdev->dev,
10130 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10131 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10132 pf->hw.func_caps.num_msix_vectors,
10133 pf->hw.func_caps.num_msix_vectors_vf,
10134 pf->hw.func_caps.fd_filters_guaranteed,
10135 pf->hw.func_caps.fd_filters_best_effort,
10136 pf->hw.func_caps.num_tx_qp,
10137 pf->hw.func_caps.num_vsis);
10138 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10139 dev_info(&pf->pdev->dev,
10140 "switch_mode=0x%04x, function_valid=0x%08x\n",
10141 pf->hw.dev_caps.switch_mode,
10142 pf->hw.dev_caps.valid_functions);
10143 dev_info(&pf->pdev->dev,
10144 "SR-IOV=%d, num_vfs for all function=%u\n",
10145 pf->hw.dev_caps.sr_iov_1_1,
10146 pf->hw.dev_caps.num_vfs);
10147 dev_info(&pf->pdev->dev,
10148 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10149 pf->hw.dev_caps.num_vsis,
10150 pf->hw.dev_caps.num_rx_qp,
10151 pf->hw.dev_caps.num_tx_qp);
10154 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10155 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10156 + pf->hw.func_caps.num_vfs)
10157 if (pf->hw.revision_id == 0 &&
10158 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10159 dev_info(&pf->pdev->dev,
10160 "got num_vsis %d, setting num_vsis to %d\n",
10161 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10162 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10168 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10171 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10172 * @pf: board private structure
10174 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10176 struct i40e_vsi *vsi;
10178 /* quick workaround for an NVM issue that leaves a critical register
10181 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10182 static const u32 hkey[] = {
10183 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10184 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10185 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10189 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10190 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10193 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10196 /* find existing VSI and see if it needs configuring */
10197 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10199 /* create a new VSI if none exists */
10201 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10202 pf->vsi[pf->lan_vsi]->seid, 0);
10204 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10205 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10206 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10211 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10215 * i40e_fdir_teardown - release the Flow Director resources
10216 * @pf: board private structure
10218 static void i40e_fdir_teardown(struct i40e_pf *pf)
10220 struct i40e_vsi *vsi;
10222 i40e_fdir_filter_exit(pf);
10223 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10225 i40e_vsi_release(vsi);
10229 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10230 * @vsi: PF main vsi
10231 * @seid: seid of main or channel VSIs
10233 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10234 * existed before reset
10236 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10238 struct i40e_cloud_filter *cfilter;
10239 struct i40e_pf *pf = vsi->back;
10240 struct hlist_node *node;
10243 /* Add cloud filters back if they exist */
10244 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10246 if (cfilter->seid != seid)
10249 if (cfilter->dst_port)
10250 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10253 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10256 dev_dbg(&pf->pdev->dev,
10257 "Failed to rebuild cloud filter, err %s aq_err %s\n",
10258 i40e_stat_str(&pf->hw, ret),
10259 i40e_aq_str(&pf->hw,
10260 pf->hw.aq.asq_last_status));
10268 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10269 * @vsi: PF main vsi
10271 * Rebuilds channel VSIs if they existed before reset
10273 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10275 struct i40e_channel *ch, *ch_tmp;
10278 if (list_empty(&vsi->ch_list))
10281 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10282 if (!ch->initialized)
10284 /* Proceed with creation of channel (VMDq2) VSI */
10285 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10287 dev_info(&vsi->back->pdev->dev,
10288 "failed to rebuild channels using uplink_seid %u\n",
10292 /* Reconfigure TX queues using QTX_CTL register */
10293 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10295 dev_info(&vsi->back->pdev->dev,
10296 "failed to configure TX rings for channel %u\n",
10300 /* update 'next_base_queue' */
10301 vsi->next_base_queue = vsi->next_base_queue +
10302 ch->num_queue_pairs;
10303 if (ch->max_tx_rate) {
10304 u64 credits = ch->max_tx_rate;
10306 if (i40e_set_bw_limit(vsi, ch->seid,
10310 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10311 dev_dbg(&vsi->back->pdev->dev,
10312 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10317 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10319 dev_dbg(&vsi->back->pdev->dev,
10320 "Failed to rebuild cloud filters for channel VSI %u\n",
10329 * i40e_prep_for_reset - prep for the core to reset
10330 * @pf: board private structure
10332 * Close up the VFs and other things in prep for PF Reset.
10334 static void i40e_prep_for_reset(struct i40e_pf *pf)
10336 struct i40e_hw *hw = &pf->hw;
10337 i40e_status ret = 0;
10340 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10341 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10343 if (i40e_check_asq_alive(&pf->hw))
10344 i40e_vc_notify_reset(pf);
10346 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10348 /* quiesce the VSIs and their queues that are not already DOWN */
10349 i40e_pf_quiesce_all_vsi(pf);
10351 for (v = 0; v < pf->num_alloc_vsi; v++) {
10353 pf->vsi[v]->seid = 0;
10356 i40e_shutdown_adminq(&pf->hw);
10358 /* call shutdown HMC */
10359 if (hw->hmc.hmc_obj) {
10360 ret = i40e_shutdown_lan_hmc(hw);
10362 dev_warn(&pf->pdev->dev,
10363 "shutdown_lan_hmc failed: %d\n", ret);
10366 /* Save the current PTP time so that we can restore the time after the
10369 i40e_ptp_save_hw_time(pf);
10373 * i40e_send_version - update firmware with driver version
10376 static void i40e_send_version(struct i40e_pf *pf)
10378 struct i40e_driver_version dv;
10380 dv.major_version = 0xff;
10381 dv.minor_version = 0xff;
10382 dv.build_version = 0xff;
10383 dv.subbuild_version = 0;
10384 strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10385 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10389 * i40e_get_oem_version - get OEM specific version information
10390 * @hw: pointer to the hardware structure
10392 static void i40e_get_oem_version(struct i40e_hw *hw)
10394 u16 block_offset = 0xffff;
10395 u16 block_length = 0;
10396 u16 capabilities = 0;
10400 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10401 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10402 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10403 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10404 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10405 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10406 #define I40E_NVM_OEM_LENGTH 3
10408 /* Check if pointer to OEM version block is valid. */
10409 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10410 if (block_offset == 0xffff)
10413 /* Check if OEM version block has correct length. */
10414 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10416 if (block_length < I40E_NVM_OEM_LENGTH)
10419 /* Check if OEM version format is as expected. */
10420 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10422 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10425 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10427 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10429 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
10430 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10434 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10435 * @pf: board private structure
10437 static int i40e_reset(struct i40e_pf *pf)
10439 struct i40e_hw *hw = &pf->hw;
10442 ret = i40e_pf_reset(hw);
10444 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10445 set_bit(__I40E_RESET_FAILED, pf->state);
10446 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10454 * i40e_rebuild - rebuild using a saved config
10455 * @pf: board private structure
10456 * @reinit: if the Main VSI needs to re-initialized.
10457 * @lock_acquired: indicates whether or not the lock has been acquired
10458 * before this function was called.
10460 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10462 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
10463 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10464 struct i40e_hw *hw = &pf->hw;
10465 u8 set_fc_aq_fail = 0;
10470 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10471 i40e_check_recovery_mode(pf)) {
10472 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10475 if (test_bit(__I40E_DOWN, pf->state) &&
10476 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
10477 !old_recovery_mode_bit)
10478 goto clear_recovery;
10479 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10481 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10482 ret = i40e_init_adminq(&pf->hw);
10484 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
10485 i40e_stat_str(&pf->hw, ret),
10486 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10487 goto clear_recovery;
10489 i40e_get_oem_version(&pf->hw);
10491 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10492 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
10493 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
10494 /* The following delay is necessary for 4.33 firmware and older
10495 * to recover after EMP reset. 200 ms should suffice but we
10496 * put here 300 ms to be sure that FW is ready to operate
10502 /* re-verify the eeprom if we just had an EMP reset */
10503 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10504 i40e_verify_eeprom(pf);
10506 /* if we are going out of or into recovery mode we have to act
10507 * accordingly with regard to resources initialization
10508 * and deinitialization
10510 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
10511 old_recovery_mode_bit) {
10512 if (i40e_get_capabilities(pf,
10513 i40e_aqc_opc_list_func_capabilities))
10516 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10517 /* we're staying in recovery mode so we'll reinitialize
10520 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10523 if (!lock_acquired)
10525 /* we're going out of recovery mode so we'll free
10526 * the IRQ allocated specifically for recovery mode
10527 * and restore the interrupt scheme
10529 free_irq(pf->pdev->irq, pf);
10530 i40e_clear_interrupt_scheme(pf);
10531 if (i40e_restore_interrupt_scheme(pf))
10535 /* tell the firmware that we're starting */
10536 i40e_send_version(pf);
10538 /* bail out in case recovery mode was detected, as there is
10539 * no need for further configuration.
10544 i40e_clear_pxe_mode(hw);
10545 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10547 goto end_core_reset;
10549 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10550 hw->func_caps.num_rx_qp, 0, 0);
10552 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10553 goto end_core_reset;
10555 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10557 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10558 goto end_core_reset;
10561 if (!lock_acquired)
10563 ret = i40e_setup_pf_switch(pf, reinit);
10567 #ifdef CONFIG_I40E_DCB
10568 /* Enable FW to write a default DCB config on link-up
10569 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10570 * is not supported with new link speed
10572 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
10573 i40e_aq_set_dcb_parameters(hw, false, NULL);
10575 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10576 (hw->phy.link_info.link_speed &
10577 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10578 i40e_aq_set_dcb_parameters(hw, false, NULL);
10579 dev_warn(&pf->pdev->dev,
10580 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10581 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10583 i40e_aq_set_dcb_parameters(hw, true, NULL);
10584 ret = i40e_init_pf_dcb(pf);
10586 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10588 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10589 /* Continue without DCB enabled */
10594 #endif /* CONFIG_I40E_DCB */
10596 /* The driver only wants link up/down and module qualification
10597 * reports from firmware. Note the negative logic.
10599 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10600 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10601 I40E_AQ_EVENT_MEDIA_NA |
10602 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10604 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10605 i40e_stat_str(&pf->hw, ret),
10606 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10608 /* make sure our flow control settings are restored */
10609 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10611 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10612 i40e_stat_str(&pf->hw, ret),
10613 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10615 /* Rebuild the VSIs and VEBs that existed before reset.
10616 * They are still in our local switch element arrays, so only
10617 * need to rebuild the switch model in the HW.
10619 * If there were VEBs but the reconstitution failed, we'll try
10620 * try to recover minimal use by getting the basic PF VSI working.
10622 if (vsi->uplink_seid != pf->mac_seid) {
10623 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10624 /* find the one VEB connected to the MAC, and find orphans */
10625 for (v = 0; v < I40E_MAX_VEB; v++) {
10629 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10630 pf->veb[v]->uplink_seid == 0) {
10631 ret = i40e_reconstitute_veb(pf->veb[v]);
10636 /* If Main VEB failed, we're in deep doodoo,
10637 * so give up rebuilding the switch and set up
10638 * for minimal rebuild of PF VSI.
10639 * If orphan failed, we'll report the error
10640 * but try to keep going.
10642 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10643 dev_info(&pf->pdev->dev,
10644 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10646 vsi->uplink_seid = pf->mac_seid;
10648 } else if (pf->veb[v]->uplink_seid == 0) {
10649 dev_info(&pf->pdev->dev,
10650 "rebuild of orphan VEB failed: %d\n",
10657 if (vsi->uplink_seid == pf->mac_seid) {
10658 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10659 /* no VEB, so rebuild only the Main VSI */
10660 ret = i40e_add_vsi(vsi);
10662 dev_info(&pf->pdev->dev,
10663 "rebuild of Main VSI failed: %d\n", ret);
10668 if (vsi->mqprio_qopt.max_rate[0]) {
10669 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10672 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10673 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10677 credits = max_tx_rate;
10678 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10679 dev_dbg(&vsi->back->pdev->dev,
10680 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10686 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10690 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10691 * for this main VSI if they exist
10693 ret = i40e_rebuild_channels(vsi);
10697 /* Reconfigure hardware for allowing smaller MSS in the case
10698 * of TSO, so that we avoid the MDD being fired and causing
10699 * a reset in the case of small MSS+TSO.
10701 #define I40E_REG_MSS 0x000E64DC
10702 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10703 #define I40E_64BYTE_MSS 0x400000
10704 val = rd32(hw, I40E_REG_MSS);
10705 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10706 val &= ~I40E_REG_MSS_MIN_MASK;
10707 val |= I40E_64BYTE_MSS;
10708 wr32(hw, I40E_REG_MSS, val);
10711 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10713 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10715 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10716 i40e_stat_str(&pf->hw, ret),
10717 i40e_aq_str(&pf->hw,
10718 pf->hw.aq.asq_last_status));
10720 /* reinit the misc interrupt */
10721 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10722 ret = i40e_setup_misc_vector(pf);
10724 /* Add a filter to drop all Flow control frames from any VSI from being
10725 * transmitted. By doing so we stop a malicious VF from sending out
10726 * PAUSE or PFC frames and potentially controlling traffic for other
10728 * The FW can still send Flow control frames if enabled.
10730 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10731 pf->main_vsi_seid);
10732 #ifdef CONFIG_I40E_DCB
10733 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
10734 i40e_set_lldp_forwarding(pf, true);
10735 #endif /* CONFIG_I40E_DCB */
10737 /* restart the VSIs that were rebuilt and running before the reset */
10738 i40e_pf_unquiesce_all_vsi(pf);
10740 /* Release the RTNL lock before we start resetting VFs */
10741 if (!lock_acquired)
10744 /* Restore promiscuous settings */
10745 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10747 dev_warn(&pf->pdev->dev,
10748 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10749 pf->cur_promisc ? "on" : "off",
10750 i40e_stat_str(&pf->hw, ret),
10751 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10753 i40e_reset_all_vfs(pf, true);
10755 /* tell the firmware that we're starting */
10756 i40e_send_version(pf);
10758 /* We've already released the lock, so don't do it again */
10759 goto end_core_reset;
10762 if (!lock_acquired)
10765 clear_bit(__I40E_RESET_FAILED, pf->state);
10767 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10768 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10772 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10773 * @pf: board private structure
10774 * @reinit: if the Main VSI needs to re-initialized.
10775 * @lock_acquired: indicates whether or not the lock has been acquired
10776 * before this function was called.
10778 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10779 bool lock_acquired)
10782 /* Now we wait for GRST to settle out.
10783 * We don't have to delete the VEBs or VSIs from the hw switch
10784 * because the reset will make them disappear.
10786 ret = i40e_reset(pf);
10788 i40e_rebuild(pf, reinit, lock_acquired);
10792 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10793 * @pf: board private structure
10795 * Close up the VFs and other things in prep for a Core Reset,
10796 * then get ready to rebuild the world.
10797 * @lock_acquired: indicates whether or not the lock has been acquired
10798 * before this function was called.
10800 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10802 i40e_prep_for_reset(pf);
10803 i40e_reset_and_rebuild(pf, false, lock_acquired);
10807 * i40e_handle_mdd_event
10808 * @pf: pointer to the PF structure
10810 * Called from the MDD irq handler to identify possibly malicious vfs
10812 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10814 struct i40e_hw *hw = &pf->hw;
10815 bool mdd_detected = false;
10816 struct i40e_vf *vf;
10820 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10823 /* find what triggered the MDD event */
10824 reg = rd32(hw, I40E_GL_MDET_TX);
10825 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10826 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10827 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10828 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10829 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10830 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10831 I40E_GL_MDET_TX_EVENT_SHIFT;
10832 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10833 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10834 pf->hw.func_caps.base_queue;
10835 if (netif_msg_tx_err(pf))
10836 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10837 event, queue, pf_num, vf_num);
10838 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10839 mdd_detected = true;
10841 reg = rd32(hw, I40E_GL_MDET_RX);
10842 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10843 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10844 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10845 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10846 I40E_GL_MDET_RX_EVENT_SHIFT;
10847 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10848 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10849 pf->hw.func_caps.base_queue;
10850 if (netif_msg_rx_err(pf))
10851 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10852 event, queue, func);
10853 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10854 mdd_detected = true;
10857 if (mdd_detected) {
10858 reg = rd32(hw, I40E_PF_MDET_TX);
10859 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10860 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10861 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10863 reg = rd32(hw, I40E_PF_MDET_RX);
10864 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10865 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10866 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10870 /* see if one of the VFs needs its hand slapped */
10871 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10873 reg = rd32(hw, I40E_VP_MDET_TX(i));
10874 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10875 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10876 vf->num_mdd_events++;
10877 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10879 dev_info(&pf->pdev->dev,
10880 "Use PF Control I/F to re-enable the VF\n");
10881 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10884 reg = rd32(hw, I40E_VP_MDET_RX(i));
10885 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10886 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10887 vf->num_mdd_events++;
10888 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10890 dev_info(&pf->pdev->dev,
10891 "Use PF Control I/F to re-enable the VF\n");
10892 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10896 /* re-enable mdd interrupt cause */
10897 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10898 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10899 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10900 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10905 * i40e_service_task - Run the driver's async subtasks
10906 * @work: pointer to work_struct containing our data
10908 static void i40e_service_task(struct work_struct *work)
10910 struct i40e_pf *pf = container_of(work,
10913 unsigned long start_time = jiffies;
10915 /* don't bother with service tasks if a reset is in progress */
10916 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10917 test_bit(__I40E_SUSPENDED, pf->state))
10920 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10923 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10924 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10925 i40e_sync_filters_subtask(pf);
10926 i40e_reset_subtask(pf);
10927 i40e_handle_mdd_event(pf);
10928 i40e_vc_process_vflr_event(pf);
10929 i40e_watchdog_subtask(pf);
10930 i40e_fdir_reinit_subtask(pf);
10931 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10932 /* Client subtask will reopen next time through. */
10933 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10936 i40e_client_subtask(pf);
10937 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10939 i40e_notify_client_of_l2_param_changes(
10940 pf->vsi[pf->lan_vsi]);
10942 i40e_sync_filters_subtask(pf);
10944 i40e_reset_subtask(pf);
10947 i40e_clean_adminq_subtask(pf);
10949 /* flush memory to make sure state is correct before next watchdog */
10950 smp_mb__before_atomic();
10951 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10953 /* If the tasks have taken longer than one timer cycle or there
10954 * is more work to be done, reschedule the service task now
10955 * rather than wait for the timer to tick again.
10957 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10958 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10959 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10960 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10961 i40e_service_event_schedule(pf);
10965 * i40e_service_timer - timer callback
10966 * @t: timer list pointer
10968 static void i40e_service_timer(struct timer_list *t)
10970 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10972 mod_timer(&pf->service_timer,
10973 round_jiffies(jiffies + pf->service_timer_period));
10974 i40e_service_event_schedule(pf);
10978 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10979 * @vsi: the VSI being configured
10981 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10983 struct i40e_pf *pf = vsi->back;
10985 switch (vsi->type) {
10986 case I40E_VSI_MAIN:
10987 vsi->alloc_queue_pairs = pf->num_lan_qps;
10988 if (!vsi->num_tx_desc)
10989 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10990 I40E_REQ_DESCRIPTOR_MULTIPLE);
10991 if (!vsi->num_rx_desc)
10992 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10993 I40E_REQ_DESCRIPTOR_MULTIPLE);
10994 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10995 vsi->num_q_vectors = pf->num_lan_msix;
10997 vsi->num_q_vectors = 1;
11001 case I40E_VSI_FDIR:
11002 vsi->alloc_queue_pairs = 1;
11003 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11004 I40E_REQ_DESCRIPTOR_MULTIPLE);
11005 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11006 I40E_REQ_DESCRIPTOR_MULTIPLE);
11007 vsi->num_q_vectors = pf->num_fdsb_msix;
11010 case I40E_VSI_VMDQ2:
11011 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11012 if (!vsi->num_tx_desc)
11013 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11014 I40E_REQ_DESCRIPTOR_MULTIPLE);
11015 if (!vsi->num_rx_desc)
11016 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11017 I40E_REQ_DESCRIPTOR_MULTIPLE);
11018 vsi->num_q_vectors = pf->num_vmdq_msix;
11021 case I40E_VSI_SRIOV:
11022 vsi->alloc_queue_pairs = pf->num_vf_qps;
11023 if (!vsi->num_tx_desc)
11024 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11025 I40E_REQ_DESCRIPTOR_MULTIPLE);
11026 if (!vsi->num_rx_desc)
11027 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11028 I40E_REQ_DESCRIPTOR_MULTIPLE);
11040 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11041 * @vsi: VSI pointer
11042 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11044 * On error: returns error code (negative)
11045 * On success: returns 0
11047 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11049 struct i40e_ring **next_rings;
11053 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11054 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11055 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11056 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11057 if (!vsi->tx_rings)
11059 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11060 if (i40e_enabled_xdp_vsi(vsi)) {
11061 vsi->xdp_rings = next_rings;
11062 next_rings += vsi->alloc_queue_pairs;
11064 vsi->rx_rings = next_rings;
11066 if (alloc_qvectors) {
11067 /* allocate memory for q_vector pointers */
11068 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11069 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11070 if (!vsi->q_vectors) {
11078 kfree(vsi->tx_rings);
11083 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11084 * @pf: board private structure
11085 * @type: type of VSI
11087 * On error: returns error code (negative)
11088 * On success: returns vsi index in PF (positive)
11090 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11093 struct i40e_vsi *vsi;
11097 /* Need to protect the allocation of the VSIs at the PF level */
11098 mutex_lock(&pf->switch_mutex);
11100 /* VSI list may be fragmented if VSI creation/destruction has
11101 * been happening. We can afford to do a quick scan to look
11102 * for any free VSIs in the list.
11104 * find next empty vsi slot, looping back around if necessary
11107 while (i < pf->num_alloc_vsi && pf->vsi[i])
11109 if (i >= pf->num_alloc_vsi) {
11111 while (i < pf->next_vsi && pf->vsi[i])
11115 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11116 vsi_idx = i; /* Found one! */
11119 goto unlock_pf; /* out of VSI slots! */
11121 pf->next_vsi = ++i;
11123 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11130 set_bit(__I40E_VSI_DOWN, vsi->state);
11132 vsi->idx = vsi_idx;
11133 vsi->int_rate_limit = 0;
11134 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11135 pf->rss_table_size : 64;
11136 vsi->netdev_registered = false;
11137 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11138 hash_init(vsi->mac_filter_hash);
11139 vsi->irqs_ready = false;
11141 if (type == I40E_VSI_MAIN) {
11142 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11143 if (!vsi->af_xdp_zc_qps)
11147 ret = i40e_set_num_rings_in_vsi(vsi);
11151 ret = i40e_vsi_alloc_arrays(vsi, true);
11155 /* Setup default MSIX irq handler for VSI */
11156 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11158 /* Initialize VSI lock */
11159 spin_lock_init(&vsi->mac_filter_hash_lock);
11160 pf->vsi[vsi_idx] = vsi;
11165 bitmap_free(vsi->af_xdp_zc_qps);
11166 pf->next_vsi = i - 1;
11169 mutex_unlock(&pf->switch_mutex);
11174 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11175 * @vsi: VSI pointer
11176 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11178 * On error: returns error code (negative)
11179 * On success: returns 0
11181 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11183 /* free the ring and vector containers */
11184 if (free_qvectors) {
11185 kfree(vsi->q_vectors);
11186 vsi->q_vectors = NULL;
11188 kfree(vsi->tx_rings);
11189 vsi->tx_rings = NULL;
11190 vsi->rx_rings = NULL;
11191 vsi->xdp_rings = NULL;
11195 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11197 * @vsi: Pointer to VSI structure
11199 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11204 kfree(vsi->rss_hkey_user);
11205 vsi->rss_hkey_user = NULL;
11207 kfree(vsi->rss_lut_user);
11208 vsi->rss_lut_user = NULL;
11212 * i40e_vsi_clear - Deallocate the VSI provided
11213 * @vsi: the VSI being un-configured
11215 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11217 struct i40e_pf *pf;
11226 mutex_lock(&pf->switch_mutex);
11227 if (!pf->vsi[vsi->idx]) {
11228 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11229 vsi->idx, vsi->idx, vsi->type);
11233 if (pf->vsi[vsi->idx] != vsi) {
11234 dev_err(&pf->pdev->dev,
11235 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11236 pf->vsi[vsi->idx]->idx,
11237 pf->vsi[vsi->idx]->type,
11238 vsi->idx, vsi->type);
11242 /* updates the PF for this cleared vsi */
11243 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11244 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11246 bitmap_free(vsi->af_xdp_zc_qps);
11247 i40e_vsi_free_arrays(vsi, true);
11248 i40e_clear_rss_config_user(vsi);
11250 pf->vsi[vsi->idx] = NULL;
11251 if (vsi->idx < pf->next_vsi)
11252 pf->next_vsi = vsi->idx;
11255 mutex_unlock(&pf->switch_mutex);
11263 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11264 * @vsi: the VSI being cleaned
11266 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11270 if (vsi->tx_rings && vsi->tx_rings[0]) {
11271 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11272 kfree_rcu(vsi->tx_rings[i], rcu);
11273 WRITE_ONCE(vsi->tx_rings[i], NULL);
11274 WRITE_ONCE(vsi->rx_rings[i], NULL);
11275 if (vsi->xdp_rings)
11276 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11282 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11283 * @vsi: the VSI being configured
11285 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11287 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11288 struct i40e_pf *pf = vsi->back;
11289 struct i40e_ring *ring;
11291 /* Set basic values in the rings to be used later during open() */
11292 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11293 /* allocate space for both Tx and Rx in one shot */
11294 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11298 ring->queue_index = i;
11299 ring->reg_idx = vsi->base_queue + i;
11300 ring->ring_active = false;
11302 ring->netdev = vsi->netdev;
11303 ring->dev = &pf->pdev->dev;
11304 ring->count = vsi->num_tx_desc;
11307 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11308 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11309 ring->itr_setting = pf->tx_itr_default;
11310 WRITE_ONCE(vsi->tx_rings[i], ring++);
11312 if (!i40e_enabled_xdp_vsi(vsi))
11315 ring->queue_index = vsi->alloc_queue_pairs + i;
11316 ring->reg_idx = vsi->base_queue + ring->queue_index;
11317 ring->ring_active = false;
11319 ring->netdev = NULL;
11320 ring->dev = &pf->pdev->dev;
11321 ring->count = vsi->num_tx_desc;
11324 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11325 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11326 set_ring_xdp(ring);
11327 ring->itr_setting = pf->tx_itr_default;
11328 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11331 ring->queue_index = i;
11332 ring->reg_idx = vsi->base_queue + i;
11333 ring->ring_active = false;
11335 ring->netdev = vsi->netdev;
11336 ring->dev = &pf->pdev->dev;
11337 ring->count = vsi->num_rx_desc;
11340 ring->itr_setting = pf->rx_itr_default;
11341 WRITE_ONCE(vsi->rx_rings[i], ring);
11347 i40e_vsi_clear_rings(vsi);
11352 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11353 * @pf: board private structure
11354 * @vectors: the number of MSI-X vectors to request
11356 * Returns the number of vectors reserved, or error
11358 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11360 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11361 I40E_MIN_MSIX, vectors);
11363 dev_info(&pf->pdev->dev,
11364 "MSI-X vector reservation failed: %d\n", vectors);
11372 * i40e_init_msix - Setup the MSIX capability
11373 * @pf: board private structure
11375 * Work with the OS to set up the MSIX vectors needed.
11377 * Returns the number of vectors reserved or negative on failure
11379 static int i40e_init_msix(struct i40e_pf *pf)
11381 struct i40e_hw *hw = &pf->hw;
11382 int cpus, extra_vectors;
11386 int iwarp_requested = 0;
11388 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11391 /* The number of vectors we'll request will be comprised of:
11392 * - Add 1 for "other" cause for Admin Queue events, etc.
11393 * - The number of LAN queue pairs
11394 * - Queues being used for RSS.
11395 * We don't need as many as max_rss_size vectors.
11396 * use rss_size instead in the calculation since that
11397 * is governed by number of cpus in the system.
11398 * - assumes symmetric Tx/Rx pairing
11399 * - The number of VMDq pairs
11400 * - The CPU count within the NUMA node if iWARP is enabled
11401 * Once we count this up, try the request.
11403 * If we can't get what we want, we'll simplify to nearly nothing
11404 * and try again. If that still fails, we punt.
11406 vectors_left = hw->func_caps.num_msix_vectors;
11409 /* reserve one vector for miscellaneous handler */
11410 if (vectors_left) {
11415 /* reserve some vectors for the main PF traffic queues. Initially we
11416 * only reserve at most 50% of the available vectors, in the case that
11417 * the number of online CPUs is large. This ensures that we can enable
11418 * extra features as well. Once we've enabled the other features, we
11419 * will use any remaining vectors to reach as close as we can to the
11420 * number of online CPUs.
11422 cpus = num_online_cpus();
11423 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11424 vectors_left -= pf->num_lan_msix;
11426 /* reserve one vector for sideband flow director */
11427 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11428 if (vectors_left) {
11429 pf->num_fdsb_msix = 1;
11433 pf->num_fdsb_msix = 0;
11437 /* can we reserve enough for iWARP? */
11438 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11439 iwarp_requested = pf->num_iwarp_msix;
11442 pf->num_iwarp_msix = 0;
11443 else if (vectors_left < pf->num_iwarp_msix)
11444 pf->num_iwarp_msix = 1;
11445 v_budget += pf->num_iwarp_msix;
11446 vectors_left -= pf->num_iwarp_msix;
11449 /* any vectors left over go for VMDq support */
11450 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11451 if (!vectors_left) {
11452 pf->num_vmdq_msix = 0;
11453 pf->num_vmdq_qps = 0;
11455 int vmdq_vecs_wanted =
11456 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11458 min_t(int, vectors_left, vmdq_vecs_wanted);
11460 /* if we're short on vectors for what's desired, we limit
11461 * the queues per vmdq. If this is still more than are
11462 * available, the user will need to change the number of
11463 * queues/vectors used by the PF later with the ethtool
11466 if (vectors_left < vmdq_vecs_wanted) {
11467 pf->num_vmdq_qps = 1;
11468 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11469 vmdq_vecs = min_t(int,
11473 pf->num_vmdq_msix = pf->num_vmdq_qps;
11475 v_budget += vmdq_vecs;
11476 vectors_left -= vmdq_vecs;
11480 /* On systems with a large number of SMP cores, we previously limited
11481 * the number of vectors for num_lan_msix to be at most 50% of the
11482 * available vectors, to allow for other features. Now, we add back
11483 * the remaining vectors. However, we ensure that the total
11484 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11485 * calculate the number of vectors we can add without going over the
11486 * cap of CPUs. For systems with a small number of CPUs this will be
11489 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11490 pf->num_lan_msix += extra_vectors;
11491 vectors_left -= extra_vectors;
11493 WARN(vectors_left < 0,
11494 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11496 v_budget += pf->num_lan_msix;
11497 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11499 if (!pf->msix_entries)
11502 for (i = 0; i < v_budget; i++)
11503 pf->msix_entries[i].entry = i;
11504 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11506 if (v_actual < I40E_MIN_MSIX) {
11507 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11508 kfree(pf->msix_entries);
11509 pf->msix_entries = NULL;
11510 pci_disable_msix(pf->pdev);
11513 } else if (v_actual == I40E_MIN_MSIX) {
11514 /* Adjust for minimal MSIX use */
11515 pf->num_vmdq_vsis = 0;
11516 pf->num_vmdq_qps = 0;
11517 pf->num_lan_qps = 1;
11518 pf->num_lan_msix = 1;
11520 } else if (v_actual != v_budget) {
11521 /* If we have limited resources, we will start with no vectors
11522 * for the special features and then allocate vectors to some
11523 * of these features based on the policy and at the end disable
11524 * the features that did not get any vectors.
11528 dev_info(&pf->pdev->dev,
11529 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11530 v_actual, v_budget);
11531 /* reserve the misc vector */
11532 vec = v_actual - 1;
11534 /* Scale vector usage down */
11535 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11536 pf->num_vmdq_vsis = 1;
11537 pf->num_vmdq_qps = 1;
11539 /* partition out the remaining vectors */
11542 pf->num_lan_msix = 1;
11545 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11546 pf->num_lan_msix = 1;
11547 pf->num_iwarp_msix = 1;
11549 pf->num_lan_msix = 2;
11553 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11554 pf->num_iwarp_msix = min_t(int, (vec / 3),
11556 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11557 I40E_DEFAULT_NUM_VMDQ_VSI);
11559 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11560 I40E_DEFAULT_NUM_VMDQ_VSI);
11562 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11563 pf->num_fdsb_msix = 1;
11566 pf->num_lan_msix = min_t(int,
11567 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11569 pf->num_lan_qps = pf->num_lan_msix;
11574 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11575 (pf->num_fdsb_msix == 0)) {
11576 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11577 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11578 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11580 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11581 (pf->num_vmdq_msix == 0)) {
11582 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11583 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11586 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11587 (pf->num_iwarp_msix == 0)) {
11588 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11589 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11591 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11592 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11594 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11596 pf->num_iwarp_msix);
11602 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11603 * @vsi: the VSI being configured
11604 * @v_idx: index of the vector in the vsi struct
11606 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11608 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11610 struct i40e_q_vector *q_vector;
11612 /* allocate q_vector */
11613 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11617 q_vector->vsi = vsi;
11618 q_vector->v_idx = v_idx;
11619 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11622 netif_napi_add(vsi->netdev, &q_vector->napi,
11623 i40e_napi_poll, NAPI_POLL_WEIGHT);
11625 /* tie q_vector and vsi together */
11626 vsi->q_vectors[v_idx] = q_vector;
11632 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11633 * @vsi: the VSI being configured
11635 * We allocate one q_vector per queue interrupt. If allocation fails we
11638 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11640 struct i40e_pf *pf = vsi->back;
11641 int err, v_idx, num_q_vectors;
11643 /* if not MSIX, give the one vector only to the LAN VSI */
11644 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11645 num_q_vectors = vsi->num_q_vectors;
11646 else if (vsi == pf->vsi[pf->lan_vsi])
11651 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11652 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11661 i40e_free_q_vector(vsi, v_idx);
11667 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11668 * @pf: board private structure to initialize
11670 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11675 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11676 vectors = i40e_init_msix(pf);
11678 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11679 I40E_FLAG_IWARP_ENABLED |
11680 I40E_FLAG_RSS_ENABLED |
11681 I40E_FLAG_DCB_CAPABLE |
11682 I40E_FLAG_DCB_ENABLED |
11683 I40E_FLAG_SRIOV_ENABLED |
11684 I40E_FLAG_FD_SB_ENABLED |
11685 I40E_FLAG_FD_ATR_ENABLED |
11686 I40E_FLAG_VMDQ_ENABLED);
11687 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11689 /* rework the queue expectations without MSIX */
11690 i40e_determine_queue_usage(pf);
11694 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11695 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11696 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11697 vectors = pci_enable_msi(pf->pdev);
11699 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11701 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11703 vectors = 1; /* one MSI or Legacy vector */
11706 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11707 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11709 /* set up vector assignment tracking */
11710 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11711 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11715 pf->irq_pile->num_entries = vectors;
11716 pf->irq_pile->search_hint = 0;
11718 /* track first vector for misc interrupts, ignore return */
11719 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11725 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11726 * @pf: private board data structure
11728 * Restore the interrupt scheme that was cleared when we suspended the
11729 * device. This should be called during resume to re-allocate the q_vectors
11730 * and reacquire IRQs.
11732 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11736 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
11737 * scheme. We need to re-enabled them here in order to attempt to
11738 * re-acquire the MSI or MSI-X vectors
11740 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11742 err = i40e_init_interrupt_scheme(pf);
11746 /* Now that we've re-acquired IRQs, we need to remap the vectors and
11747 * rings together again.
11749 for (i = 0; i < pf->num_alloc_vsi; i++) {
11751 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11754 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11758 err = i40e_setup_misc_vector(pf);
11762 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11763 i40e_client_update_msix_info(pf);
11770 i40e_vsi_free_q_vectors(pf->vsi[i]);
11777 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11778 * non queue events in recovery mode
11779 * @pf: board private structure
11781 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11782 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11783 * This is handled differently than in recovery mode since no Tx/Rx resources
11784 * are being allocated.
11786 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11790 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11791 err = i40e_setup_misc_vector(pf);
11794 dev_info(&pf->pdev->dev,
11795 "MSI-X misc vector request failed, error %d\n",
11800 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11802 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11806 dev_info(&pf->pdev->dev,
11807 "MSI/legacy misc vector request failed, error %d\n",
11811 i40e_enable_misc_int_causes(pf);
11812 i40e_irq_dynamic_enable_icr0(pf);
11819 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11820 * @pf: board private structure
11822 * This sets up the handler for MSIX 0, which is used to manage the
11823 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11824 * when in MSI or Legacy interrupt mode.
11826 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11828 struct i40e_hw *hw = &pf->hw;
11831 /* Only request the IRQ once, the first time through. */
11832 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11833 err = request_irq(pf->msix_entries[0].vector,
11834 i40e_intr, 0, pf->int_name, pf);
11836 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11837 dev_info(&pf->pdev->dev,
11838 "request_irq for %s failed: %d\n",
11839 pf->int_name, err);
11844 i40e_enable_misc_int_causes(pf);
11846 /* associate no queues to the misc vector */
11847 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11848 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11852 i40e_irq_dynamic_enable_icr0(pf);
11858 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11859 * @vsi: Pointer to vsi structure
11860 * @seed: Buffter to store the hash keys
11861 * @lut: Buffer to store the lookup table entries
11862 * @lut_size: Size of buffer to store the lookup table entries
11864 * Return 0 on success, negative on failure
11866 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11867 u8 *lut, u16 lut_size)
11869 struct i40e_pf *pf = vsi->back;
11870 struct i40e_hw *hw = &pf->hw;
11874 ret = i40e_aq_get_rss_key(hw, vsi->id,
11875 (struct i40e_aqc_get_set_rss_key_data *)seed);
11877 dev_info(&pf->pdev->dev,
11878 "Cannot get RSS key, err %s aq_err %s\n",
11879 i40e_stat_str(&pf->hw, ret),
11880 i40e_aq_str(&pf->hw,
11881 pf->hw.aq.asq_last_status));
11887 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11889 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11891 dev_info(&pf->pdev->dev,
11892 "Cannot get RSS lut, err %s aq_err %s\n",
11893 i40e_stat_str(&pf->hw, ret),
11894 i40e_aq_str(&pf->hw,
11895 pf->hw.aq.asq_last_status));
11904 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
11905 * @vsi: Pointer to vsi structure
11906 * @seed: RSS hash seed
11907 * @lut: Lookup table
11908 * @lut_size: Lookup table size
11910 * Returns 0 on success, negative on failure
11912 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11913 const u8 *lut, u16 lut_size)
11915 struct i40e_pf *pf = vsi->back;
11916 struct i40e_hw *hw = &pf->hw;
11917 u16 vf_id = vsi->vf_id;
11920 /* Fill out hash function seed */
11922 u32 *seed_dw = (u32 *)seed;
11924 if (vsi->type == I40E_VSI_MAIN) {
11925 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11926 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11927 } else if (vsi->type == I40E_VSI_SRIOV) {
11928 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11929 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11931 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11936 u32 *lut_dw = (u32 *)lut;
11938 if (vsi->type == I40E_VSI_MAIN) {
11939 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11941 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11942 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11943 } else if (vsi->type == I40E_VSI_SRIOV) {
11944 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11946 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11947 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11949 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11958 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
11959 * @vsi: Pointer to VSI structure
11960 * @seed: Buffer to store the keys
11961 * @lut: Buffer to store the lookup table entries
11962 * @lut_size: Size of buffer to store the lookup table entries
11964 * Returns 0 on success, negative on failure
11966 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11967 u8 *lut, u16 lut_size)
11969 struct i40e_pf *pf = vsi->back;
11970 struct i40e_hw *hw = &pf->hw;
11974 u32 *seed_dw = (u32 *)seed;
11976 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11977 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11980 u32 *lut_dw = (u32 *)lut;
11982 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11984 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11985 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11992 * i40e_config_rss - Configure RSS keys and lut
11993 * @vsi: Pointer to VSI structure
11994 * @seed: RSS hash seed
11995 * @lut: Lookup table
11996 * @lut_size: Lookup table size
11998 * Returns 0 on success, negative on failure
12000 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12002 struct i40e_pf *pf = vsi->back;
12004 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12005 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12007 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12011 * i40e_get_rss - Get RSS keys and lut
12012 * @vsi: Pointer to VSI structure
12013 * @seed: Buffer to store the keys
12014 * @lut: Buffer to store the lookup table entries
12015 * @lut_size: Size of buffer to store the lookup table entries
12017 * Returns 0 on success, negative on failure
12019 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12021 struct i40e_pf *pf = vsi->back;
12023 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12024 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12026 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12030 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12031 * @pf: Pointer to board private structure
12032 * @lut: Lookup table
12033 * @rss_table_size: Lookup table size
12034 * @rss_size: Range of queue number for hashing
12036 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12037 u16 rss_table_size, u16 rss_size)
12041 for (i = 0; i < rss_table_size; i++)
12042 lut[i] = i % rss_size;
12046 * i40e_pf_config_rss - Prepare for RSS if used
12047 * @pf: board private structure
12049 static int i40e_pf_config_rss(struct i40e_pf *pf)
12051 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12052 u8 seed[I40E_HKEY_ARRAY_SIZE];
12054 struct i40e_hw *hw = &pf->hw;
12059 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12060 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12061 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12062 hena |= i40e_pf_get_default_rss_hena(pf);
12064 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12065 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12067 /* Determine the RSS table size based on the hardware capabilities */
12068 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12069 reg_val = (pf->rss_table_size == 512) ?
12070 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12071 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12072 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12074 /* Determine the RSS size of the VSI */
12075 if (!vsi->rss_size) {
12077 /* If the firmware does something weird during VSI init, we
12078 * could end up with zero TCs. Check for that to avoid
12079 * divide-by-zero. It probably won't pass traffic, but it also
12082 qcount = vsi->num_queue_pairs /
12083 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12084 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12086 if (!vsi->rss_size)
12089 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12093 /* Use user configured lut if there is one, otherwise use default */
12094 if (vsi->rss_lut_user)
12095 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12097 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12099 /* Use user configured hash key if there is one, otherwise
12102 if (vsi->rss_hkey_user)
12103 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12105 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12106 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12113 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12114 * @pf: board private structure
12115 * @queue_count: the requested queue count for rss.
12117 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12118 * count which may be different from the requested queue count.
12119 * Note: expects to be called while under rtnl_lock()
12121 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12123 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12126 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12129 queue_count = min_t(int, queue_count, num_online_cpus());
12130 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12132 if (queue_count != vsi->num_queue_pairs) {
12135 vsi->req_queue_pairs = queue_count;
12136 i40e_prep_for_reset(pf);
12138 pf->alloc_rss_size = new_rss_size;
12140 i40e_reset_and_rebuild(pf, true, true);
12142 /* Discard the user configured hash keys and lut, if less
12143 * queues are enabled.
12145 if (queue_count < vsi->rss_size) {
12146 i40e_clear_rss_config_user(vsi);
12147 dev_dbg(&pf->pdev->dev,
12148 "discard user configured hash keys and lut\n");
12151 /* Reset vsi->rss_size, as number of enabled queues changed */
12152 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12153 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12155 i40e_pf_config_rss(pf);
12157 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12158 vsi->req_queue_pairs, pf->rss_size_max);
12159 return pf->alloc_rss_size;
12163 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12164 * @pf: board private structure
12166 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
12168 i40e_status status;
12169 bool min_valid, max_valid;
12170 u32 max_bw, min_bw;
12172 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12173 &min_valid, &max_valid);
12177 pf->min_bw = min_bw;
12179 pf->max_bw = max_bw;
12186 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12187 * @pf: board private structure
12189 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
12191 struct i40e_aqc_configure_partition_bw_data bw_data;
12192 i40e_status status;
12194 /* Set the valid bit for this PF */
12195 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12196 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12197 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12199 /* Set the new bandwidths */
12200 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12206 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12207 * @pf: board private structure
12209 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12211 /* Commit temporary BW setting to permanent NVM image */
12212 enum i40e_admin_queue_err last_aq_status;
12216 if (pf->hw.partition_id != 1) {
12217 dev_info(&pf->pdev->dev,
12218 "Commit BW only works on partition 1! This is partition %d",
12219 pf->hw.partition_id);
12220 ret = I40E_NOT_SUPPORTED;
12221 goto bw_commit_out;
12224 /* Acquire NVM for read access */
12225 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12226 last_aq_status = pf->hw.aq.asq_last_status;
12228 dev_info(&pf->pdev->dev,
12229 "Cannot acquire NVM for read access, err %s aq_err %s\n",
12230 i40e_stat_str(&pf->hw, ret),
12231 i40e_aq_str(&pf->hw, last_aq_status));
12232 goto bw_commit_out;
12235 /* Read word 0x10 of NVM - SW compatibility word 1 */
12236 ret = i40e_aq_read_nvm(&pf->hw,
12237 I40E_SR_NVM_CONTROL_WORD,
12238 0x10, sizeof(nvm_word), &nvm_word,
12240 /* Save off last admin queue command status before releasing
12243 last_aq_status = pf->hw.aq.asq_last_status;
12244 i40e_release_nvm(&pf->hw);
12246 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
12247 i40e_stat_str(&pf->hw, ret),
12248 i40e_aq_str(&pf->hw, last_aq_status));
12249 goto bw_commit_out;
12252 /* Wait a bit for NVM release to complete */
12255 /* Acquire NVM for write access */
12256 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12257 last_aq_status = pf->hw.aq.asq_last_status;
12259 dev_info(&pf->pdev->dev,
12260 "Cannot acquire NVM for write access, err %s aq_err %s\n",
12261 i40e_stat_str(&pf->hw, ret),
12262 i40e_aq_str(&pf->hw, last_aq_status));
12263 goto bw_commit_out;
12265 /* Write it back out unchanged to initiate update NVM,
12266 * which will force a write of the shadow (alt) RAM to
12267 * the NVM - thus storing the bandwidth values permanently.
12269 ret = i40e_aq_update_nvm(&pf->hw,
12270 I40E_SR_NVM_CONTROL_WORD,
12271 0x10, sizeof(nvm_word),
12272 &nvm_word, true, 0, NULL);
12273 /* Save off last admin queue command status before releasing
12276 last_aq_status = pf->hw.aq.asq_last_status;
12277 i40e_release_nvm(&pf->hw);
12279 dev_info(&pf->pdev->dev,
12280 "BW settings NOT SAVED, err %s aq_err %s\n",
12281 i40e_stat_str(&pf->hw, ret),
12282 i40e_aq_str(&pf->hw, last_aq_status));
12289 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12290 * if total port shutdown feature is enabled for this PF
12291 * @pf: board private structure
12293 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12295 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12296 #define I40E_FEATURES_ENABLE_PTR 0x2A
12297 #define I40E_CURRENT_SETTING_PTR 0x2B
12298 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12299 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12300 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12301 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12302 i40e_status read_status = I40E_SUCCESS;
12303 u16 sr_emp_sr_settings_ptr = 0;
12304 u16 features_enable = 0;
12305 u16 link_behavior = 0;
12308 read_status = i40e_read_nvm_word(&pf->hw,
12309 I40E_SR_EMP_SR_SETTINGS_PTR,
12310 &sr_emp_sr_settings_ptr);
12313 read_status = i40e_read_nvm_word(&pf->hw,
12314 sr_emp_sr_settings_ptr +
12315 I40E_FEATURES_ENABLE_PTR,
12319 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12320 read_status = i40e_read_nvm_module_data(&pf->hw,
12321 I40E_SR_EMP_SR_SETTINGS_PTR,
12322 I40E_CURRENT_SETTING_PTR,
12323 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12324 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12328 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12329 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12334 dev_warn(&pf->pdev->dev,
12335 "total-port-shutdown feature is off due to read nvm error: %s\n",
12336 i40e_stat_str(&pf->hw, read_status));
12341 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12342 * @pf: board private structure to initialize
12344 * i40e_sw_init initializes the Adapter private data structure.
12345 * Fields are initialized based on PCI device information and
12346 * OS network device settings (MTU size).
12348 static int i40e_sw_init(struct i40e_pf *pf)
12353 /* Set default capability flags */
12354 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12355 I40E_FLAG_MSI_ENABLED |
12356 I40E_FLAG_MSIX_ENABLED;
12358 /* Set default ITR */
12359 pf->rx_itr_default = I40E_ITR_RX_DEF;
12360 pf->tx_itr_default = I40E_ITR_TX_DEF;
12362 /* Depending on PF configurations, it is possible that the RSS
12363 * maximum might end up larger than the available queues
12365 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12366 pf->alloc_rss_size = 1;
12367 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12368 pf->rss_size_max = min_t(int, pf->rss_size_max,
12369 pf->hw.func_caps.num_tx_qp);
12370 if (pf->hw.func_caps.rss) {
12371 pf->flags |= I40E_FLAG_RSS_ENABLED;
12372 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12373 num_online_cpus());
12376 /* MFP mode enabled */
12377 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12378 pf->flags |= I40E_FLAG_MFP_ENABLED;
12379 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12380 if (i40e_get_partition_bw_setting(pf)) {
12381 dev_warn(&pf->pdev->dev,
12382 "Could not get partition bw settings\n");
12384 dev_info(&pf->pdev->dev,
12385 "Partition BW Min = %8.8x, Max = %8.8x\n",
12386 pf->min_bw, pf->max_bw);
12388 /* nudge the Tx scheduler */
12389 i40e_set_partition_bw_setting(pf);
12393 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12394 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12395 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12396 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12397 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12398 pf->hw.num_partitions > 1)
12399 dev_info(&pf->pdev->dev,
12400 "Flow Director Sideband mode Disabled in MFP mode\n");
12402 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12403 pf->fdir_pf_filter_count =
12404 pf->hw.func_caps.fd_filters_guaranteed;
12405 pf->hw.fdir_shared_filter_count =
12406 pf->hw.func_caps.fd_filters_best_effort;
12409 if (pf->hw.mac.type == I40E_MAC_X722) {
12410 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12411 I40E_HW_128_QP_RSS_CAPABLE |
12412 I40E_HW_ATR_EVICT_CAPABLE |
12413 I40E_HW_WB_ON_ITR_CAPABLE |
12414 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12415 I40E_HW_NO_PCI_LINK_CHECK |
12416 I40E_HW_USE_SET_LLDP_MIB |
12417 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12418 I40E_HW_PTP_L4_CAPABLE |
12419 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12420 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12422 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12423 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12424 I40E_FDEVICT_PCTYPE_DEFAULT) {
12425 dev_warn(&pf->pdev->dev,
12426 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12427 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12429 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12430 ((pf->hw.aq.api_maj_ver == 1) &&
12431 (pf->hw.aq.api_min_ver > 4))) {
12432 /* Supported in FW API version higher than 1.4 */
12433 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12436 /* Enable HW ATR eviction if possible */
12437 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12438 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12440 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12441 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12442 (pf->hw.aq.fw_maj_ver < 4))) {
12443 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12444 /* No DCB support for FW < v4.33 */
12445 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12448 /* Disable FW LLDP if FW < v4.3 */
12449 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12450 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12451 (pf->hw.aq.fw_maj_ver < 4)))
12452 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12454 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12455 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12456 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12457 (pf->hw.aq.fw_maj_ver >= 5)))
12458 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12460 /* Enable PTP L4 if FW > v6.0 */
12461 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12462 pf->hw.aq.fw_maj_ver >= 6)
12463 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12465 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12466 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12467 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12468 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12471 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12472 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12473 /* IWARP needs one extra vector for CQP just like MISC.*/
12474 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12476 /* Stopping FW LLDP engine is supported on XL710 and X722
12477 * starting from FW versions determined in i40e_init_adminq.
12478 * Stopping the FW LLDP engine is not supported on XL710
12479 * if NPAR is functioning so unset this hw flag in this case.
12481 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12482 pf->hw.func_caps.npar_enable &&
12483 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12484 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12486 #ifdef CONFIG_PCI_IOV
12487 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12488 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12489 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12490 pf->num_req_vfs = min_t(int,
12491 pf->hw.func_caps.num_vfs,
12492 I40E_MAX_VF_COUNT);
12494 #endif /* CONFIG_PCI_IOV */
12495 pf->eeprom_version = 0xDEAD;
12496 pf->lan_veb = I40E_NO_VEB;
12497 pf->lan_vsi = I40E_NO_VSI;
12499 /* By default FW has this off for performance reasons */
12500 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12502 /* set up queue assignment tracking */
12503 size = sizeof(struct i40e_lump_tracking)
12504 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12505 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12506 if (!pf->qp_pile) {
12510 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12511 pf->qp_pile->search_hint = 0;
12513 pf->tx_timeout_recovery_level = 1;
12515 if (pf->hw.mac.type != I40E_MAC_X722 &&
12516 i40e_is_total_port_shutdown_enabled(pf)) {
12517 /* Link down on close must be on when total port shutdown
12518 * is enabled for a given port
12520 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12521 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12522 dev_info(&pf->pdev->dev,
12523 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12525 mutex_init(&pf->switch_mutex);
12532 * i40e_set_ntuple - set the ntuple feature flag and take action
12533 * @pf: board private structure to initialize
12534 * @features: the feature set that the stack is suggesting
12536 * returns a bool to indicate if reset needs to happen
12538 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12540 bool need_reset = false;
12542 /* Check if Flow Director n-tuple support was enabled or disabled. If
12543 * the state changed, we need to reset.
12545 if (features & NETIF_F_NTUPLE) {
12546 /* Enable filters and mark for reset */
12547 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12549 /* enable FD_SB only if there is MSI-X vector and no cloud
12552 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12553 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12554 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12557 /* turn off filters, mark for reset and clear SW filter list */
12558 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12560 i40e_fdir_filter_exit(pf);
12562 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12563 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12564 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12566 /* reset fd counters */
12567 pf->fd_add_err = 0;
12568 pf->fd_atr_cnt = 0;
12569 /* if ATR was auto disabled it can be re-enabled. */
12570 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12571 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12572 (I40E_DEBUG_FD & pf->hw.debug_mask))
12573 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12579 * i40e_clear_rss_lut - clear the rx hash lookup table
12580 * @vsi: the VSI being configured
12582 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12584 struct i40e_pf *pf = vsi->back;
12585 struct i40e_hw *hw = &pf->hw;
12586 u16 vf_id = vsi->vf_id;
12589 if (vsi->type == I40E_VSI_MAIN) {
12590 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12591 wr32(hw, I40E_PFQF_HLUT(i), 0);
12592 } else if (vsi->type == I40E_VSI_SRIOV) {
12593 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12594 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12596 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12601 * i40e_set_features - set the netdev feature flags
12602 * @netdev: ptr to the netdev being adjusted
12603 * @features: the feature set that the stack is suggesting
12604 * Note: expects to be called while under rtnl_lock()
12606 static int i40e_set_features(struct net_device *netdev,
12607 netdev_features_t features)
12609 struct i40e_netdev_priv *np = netdev_priv(netdev);
12610 struct i40e_vsi *vsi = np->vsi;
12611 struct i40e_pf *pf = vsi->back;
12614 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12615 i40e_pf_config_rss(pf);
12616 else if (!(features & NETIF_F_RXHASH) &&
12617 netdev->features & NETIF_F_RXHASH)
12618 i40e_clear_rss_lut(vsi);
12620 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12621 i40e_vlan_stripping_enable(vsi);
12623 i40e_vlan_stripping_disable(vsi);
12625 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12626 dev_err(&pf->pdev->dev,
12627 "Offloaded tc filters active, can't turn hw_tc_offload off");
12631 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12632 i40e_del_all_macvlans(vsi);
12634 need_reset = i40e_set_ntuple(pf, features);
12637 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12642 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12643 unsigned int table, unsigned int idx,
12644 struct udp_tunnel_info *ti)
12646 struct i40e_netdev_priv *np = netdev_priv(netdev);
12647 struct i40e_hw *hw = &np->vsi->back->hw;
12648 u8 type, filter_index;
12651 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12652 I40E_AQC_TUNNEL_TYPE_NGE;
12654 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12657 netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
12658 i40e_stat_str(hw, ret),
12659 i40e_aq_str(hw, hw->aq.asq_last_status));
12663 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12667 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12668 unsigned int table, unsigned int idx,
12669 struct udp_tunnel_info *ti)
12671 struct i40e_netdev_priv *np = netdev_priv(netdev);
12672 struct i40e_hw *hw = &np->vsi->back->hw;
12675 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12677 netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
12678 i40e_stat_str(hw, ret),
12679 i40e_aq_str(hw, hw->aq.asq_last_status));
12686 static int i40e_get_phys_port_id(struct net_device *netdev,
12687 struct netdev_phys_item_id *ppid)
12689 struct i40e_netdev_priv *np = netdev_priv(netdev);
12690 struct i40e_pf *pf = np->vsi->back;
12691 struct i40e_hw *hw = &pf->hw;
12693 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12694 return -EOPNOTSUPP;
12696 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12697 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12703 * i40e_ndo_fdb_add - add an entry to the hardware database
12704 * @ndm: the input from the stack
12705 * @tb: pointer to array of nladdr (unused)
12706 * @dev: the net device pointer
12707 * @addr: the MAC address entry being added
12709 * @flags: instructions from stack about fdb operation
12710 * @extack: netlink extended ack, unused currently
12712 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12713 struct net_device *dev,
12714 const unsigned char *addr, u16 vid,
12716 struct netlink_ext_ack *extack)
12718 struct i40e_netdev_priv *np = netdev_priv(dev);
12719 struct i40e_pf *pf = np->vsi->back;
12722 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12723 return -EOPNOTSUPP;
12726 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12730 /* Hardware does not support aging addresses so if a
12731 * ndm_state is given only allow permanent addresses
12733 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12734 netdev_info(dev, "FDB only supports static addresses\n");
12738 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12739 err = dev_uc_add_excl(dev, addr);
12740 else if (is_multicast_ether_addr(addr))
12741 err = dev_mc_add_excl(dev, addr);
12745 /* Only return duplicate errors if NLM_F_EXCL is set */
12746 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12753 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12754 * @dev: the netdev being configured
12755 * @nlh: RTNL message
12756 * @flags: bridge flags
12757 * @extack: netlink extended ack
12759 * Inserts a new hardware bridge if not already created and
12760 * enables the bridging mode requested (VEB or VEPA). If the
12761 * hardware bridge has already been inserted and the request
12762 * is to change the mode then that requires a PF reset to
12763 * allow rebuild of the components with required hardware
12764 * bridge mode enabled.
12766 * Note: expects to be called while under rtnl_lock()
12768 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12769 struct nlmsghdr *nlh,
12771 struct netlink_ext_ack *extack)
12773 struct i40e_netdev_priv *np = netdev_priv(dev);
12774 struct i40e_vsi *vsi = np->vsi;
12775 struct i40e_pf *pf = vsi->back;
12776 struct i40e_veb *veb = NULL;
12777 struct nlattr *attr, *br_spec;
12780 /* Only for PF VSI for now */
12781 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12782 return -EOPNOTSUPP;
12784 /* Find the HW bridge for PF VSI */
12785 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12786 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12790 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12792 nla_for_each_nested(attr, br_spec, rem) {
12795 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12798 mode = nla_get_u16(attr);
12799 if ((mode != BRIDGE_MODE_VEPA) &&
12800 (mode != BRIDGE_MODE_VEB))
12803 /* Insert a new HW bridge */
12805 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12806 vsi->tc_config.enabled_tc);
12808 veb->bridge_mode = mode;
12809 i40e_config_bridge_mode(veb);
12811 /* No Bridge HW offload available */
12815 } else if (mode != veb->bridge_mode) {
12816 /* Existing HW bridge but different mode needs reset */
12817 veb->bridge_mode = mode;
12818 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12819 if (mode == BRIDGE_MODE_VEB)
12820 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12822 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12823 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12832 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12835 * @seq: RTNL message seq #
12836 * @dev: the netdev being configured
12837 * @filter_mask: unused
12838 * @nlflags: netlink flags passed in
12840 * Return the mode in which the hardware bridge is operating in
12843 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12844 struct net_device *dev,
12845 u32 __always_unused filter_mask,
12848 struct i40e_netdev_priv *np = netdev_priv(dev);
12849 struct i40e_vsi *vsi = np->vsi;
12850 struct i40e_pf *pf = vsi->back;
12851 struct i40e_veb *veb = NULL;
12854 /* Only for PF VSI for now */
12855 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12856 return -EOPNOTSUPP;
12858 /* Find the HW bridge for the PF VSI */
12859 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12860 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12867 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12868 0, 0, nlflags, filter_mask, NULL);
12872 * i40e_features_check - Validate encapsulated packet conforms to limits
12874 * @dev: This physical port's netdev
12875 * @features: Offload features that the stack believes apply
12877 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12878 struct net_device *dev,
12879 netdev_features_t features)
12883 /* No point in doing any of this if neither checksum nor GSO are
12884 * being requested for this frame. We can rule out both by just
12885 * checking for CHECKSUM_PARTIAL
12887 if (skb->ip_summed != CHECKSUM_PARTIAL)
12890 /* We cannot support GSO if the MSS is going to be less than
12891 * 64 bytes. If it is then we need to drop support for GSO.
12893 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12894 features &= ~NETIF_F_GSO_MASK;
12896 /* MACLEN can support at most 63 words */
12897 len = skb_network_header(skb) - skb->data;
12898 if (len & ~(63 * 2))
12901 /* IPLEN and EIPLEN can support at most 127 dwords */
12902 len = skb_transport_header(skb) - skb_network_header(skb);
12903 if (len & ~(127 * 4))
12906 if (skb->encapsulation) {
12907 /* L4TUNLEN can support 127 words */
12908 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12909 if (len & ~(127 * 2))
12912 /* IPLEN can support at most 127 dwords */
12913 len = skb_inner_transport_header(skb) -
12914 skb_inner_network_header(skb);
12915 if (len & ~(127 * 4))
12919 /* No need to validate L4LEN as TCP is the only protocol with a
12920 * a flexible value and we support all possible values supported
12921 * by TCP, which is at most 15 dwords
12926 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12930 * i40e_xdp_setup - add/remove an XDP program
12931 * @vsi: VSI to changed
12932 * @prog: XDP program
12933 * @extack: netlink extended ack
12935 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
12936 struct netlink_ext_ack *extack)
12938 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12939 struct i40e_pf *pf = vsi->back;
12940 struct bpf_prog *old_prog;
12944 /* Don't allow frames that span over multiple buffers */
12945 if (frame_size > vsi->rx_buf_len) {
12946 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
12950 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12951 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12954 i40e_prep_for_reset(pf);
12956 old_prog = xchg(&vsi->xdp_prog, prog);
12960 /* Wait until ndo_xsk_wakeup completes. */
12962 i40e_reset_and_rebuild(pf, true, true);
12965 for (i = 0; i < vsi->num_queue_pairs; i++)
12966 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12969 bpf_prog_put(old_prog);
12971 /* Kick start the NAPI context if there is an AF_XDP socket open
12972 * on that queue id. This so that receiving will start.
12974 if (need_reset && prog)
12975 for (i = 0; i < vsi->num_queue_pairs; i++)
12976 if (vsi->xdp_rings[i]->xsk_pool)
12977 (void)i40e_xsk_wakeup(vsi->netdev, i,
12984 * i40e_enter_busy_conf - Enters busy config state
12987 * Returns 0 on success, <0 for failure.
12989 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12991 struct i40e_pf *pf = vsi->back;
12994 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12998 usleep_range(1000, 2000);
13005 * i40e_exit_busy_conf - Exits busy config state
13008 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13010 struct i40e_pf *pf = vsi->back;
13012 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13016 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13018 * @queue_pair: queue pair
13020 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13022 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13023 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13024 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13025 sizeof(vsi->tx_rings[queue_pair]->stats));
13026 if (i40e_enabled_xdp_vsi(vsi)) {
13027 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13028 sizeof(vsi->xdp_rings[queue_pair]->stats));
13033 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13035 * @queue_pair: queue pair
13037 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13039 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13040 if (i40e_enabled_xdp_vsi(vsi)) {
13041 /* Make sure that in-progress ndo_xdp_xmit calls are
13045 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13047 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13051 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13053 * @queue_pair: queue pair
13054 * @enable: true for enable, false for disable
13056 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13059 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13060 struct i40e_q_vector *q_vector = rxr->q_vector;
13065 /* All rings in a qp belong to the same qvector. */
13066 if (q_vector->rx.ring || q_vector->tx.ring) {
13068 napi_enable(&q_vector->napi);
13070 napi_disable(&q_vector->napi);
13075 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13077 * @queue_pair: queue pair
13078 * @enable: true for enable, false for disable
13080 * Returns 0 on success, <0 on failure.
13082 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13085 struct i40e_pf *pf = vsi->back;
13088 pf_q = vsi->base_queue + queue_pair;
13089 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13090 false /*is xdp*/, enable);
13092 dev_info(&pf->pdev->dev,
13093 "VSI seid %d Tx ring %d %sable timeout\n",
13094 vsi->seid, pf_q, (enable ? "en" : "dis"));
13098 i40e_control_rx_q(pf, pf_q, enable);
13099 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13101 dev_info(&pf->pdev->dev,
13102 "VSI seid %d Rx ring %d %sable timeout\n",
13103 vsi->seid, pf_q, (enable ? "en" : "dis"));
13107 /* Due to HW errata, on Rx disable only, the register can
13108 * indicate done before it really is. Needs 50ms to be sure
13113 if (!i40e_enabled_xdp_vsi(vsi))
13116 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13117 pf_q + vsi->alloc_queue_pairs,
13118 true /*is xdp*/, enable);
13120 dev_info(&pf->pdev->dev,
13121 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13122 vsi->seid, pf_q, (enable ? "en" : "dis"));
13129 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13131 * @queue_pair: queue_pair
13133 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13135 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13136 struct i40e_pf *pf = vsi->back;
13137 struct i40e_hw *hw = &pf->hw;
13139 /* All rings in a qp belong to the same qvector. */
13140 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13141 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13143 i40e_irq_dynamic_enable_icr0(pf);
13149 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13151 * @queue_pair: queue_pair
13153 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13155 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13156 struct i40e_pf *pf = vsi->back;
13157 struct i40e_hw *hw = &pf->hw;
13159 /* For simplicity, instead of removing the qp interrupt causes
13160 * from the interrupt linked list, we simply disable the interrupt, and
13161 * leave the list intact.
13163 * All rings in a qp belong to the same qvector.
13165 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13166 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13168 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13170 synchronize_irq(pf->msix_entries[intpf].vector);
13172 /* Legacy and MSI mode - this stops all interrupt handling */
13173 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13174 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13176 synchronize_irq(pf->pdev->irq);
13181 * i40e_queue_pair_disable - Disables a queue pair
13183 * @queue_pair: queue pair
13185 * Returns 0 on success, <0 on failure.
13187 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13191 err = i40e_enter_busy_conf(vsi);
13195 i40e_queue_pair_disable_irq(vsi, queue_pair);
13196 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13197 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13198 i40e_queue_pair_clean_rings(vsi, queue_pair);
13199 i40e_queue_pair_reset_stats(vsi, queue_pair);
13205 * i40e_queue_pair_enable - Enables a queue pair
13207 * @queue_pair: queue pair
13209 * Returns 0 on success, <0 on failure.
13211 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13215 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13219 if (i40e_enabled_xdp_vsi(vsi)) {
13220 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13225 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13229 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13230 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13231 i40e_queue_pair_enable_irq(vsi, queue_pair);
13233 i40e_exit_busy_conf(vsi);
13239 * i40e_xdp - implements ndo_bpf for i40e
13241 * @xdp: XDP command
13243 static int i40e_xdp(struct net_device *dev,
13244 struct netdev_bpf *xdp)
13246 struct i40e_netdev_priv *np = netdev_priv(dev);
13247 struct i40e_vsi *vsi = np->vsi;
13249 if (vsi->type != I40E_VSI_MAIN)
13252 switch (xdp->command) {
13253 case XDP_SETUP_PROG:
13254 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13255 case XDP_SETUP_XSK_POOL:
13256 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13257 xdp->xsk.queue_id);
13263 static const struct net_device_ops i40e_netdev_ops = {
13264 .ndo_open = i40e_open,
13265 .ndo_stop = i40e_close,
13266 .ndo_start_xmit = i40e_lan_xmit_frame,
13267 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13268 .ndo_set_rx_mode = i40e_set_rx_mode,
13269 .ndo_validate_addr = eth_validate_addr,
13270 .ndo_set_mac_address = i40e_set_mac,
13271 .ndo_change_mtu = i40e_change_mtu,
13272 .ndo_do_ioctl = i40e_ioctl,
13273 .ndo_tx_timeout = i40e_tx_timeout,
13274 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13275 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13276 #ifdef CONFIG_NET_POLL_CONTROLLER
13277 .ndo_poll_controller = i40e_netpoll,
13279 .ndo_setup_tc = __i40e_setup_tc,
13280 .ndo_set_features = i40e_set_features,
13281 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13282 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13283 .ndo_get_vf_stats = i40e_get_vf_stats,
13284 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13285 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13286 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13287 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13288 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13289 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13290 .ndo_fdb_add = i40e_ndo_fdb_add,
13291 .ndo_features_check = i40e_features_check,
13292 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13293 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13294 .ndo_bpf = i40e_xdp,
13295 .ndo_xdp_xmit = i40e_xdp_xmit,
13296 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13297 .ndo_dfwd_add_station = i40e_fwd_add,
13298 .ndo_dfwd_del_station = i40e_fwd_del,
13302 * i40e_config_netdev - Setup the netdev flags
13303 * @vsi: the VSI being configured
13305 * Returns 0 on success, negative value on failure
13307 static int i40e_config_netdev(struct i40e_vsi *vsi)
13309 struct i40e_pf *pf = vsi->back;
13310 struct i40e_hw *hw = &pf->hw;
13311 struct i40e_netdev_priv *np;
13312 struct net_device *netdev;
13313 u8 broadcast[ETH_ALEN];
13314 u8 mac_addr[ETH_ALEN];
13316 netdev_features_t hw_enc_features;
13317 netdev_features_t hw_features;
13319 etherdev_size = sizeof(struct i40e_netdev_priv);
13320 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13324 vsi->netdev = netdev;
13325 np = netdev_priv(netdev);
13328 hw_enc_features = NETIF_F_SG |
13330 NETIF_F_IPV6_CSUM |
13332 NETIF_F_SOFT_FEATURES |
13337 NETIF_F_GSO_GRE_CSUM |
13338 NETIF_F_GSO_PARTIAL |
13339 NETIF_F_GSO_IPXIP4 |
13340 NETIF_F_GSO_IPXIP6 |
13341 NETIF_F_GSO_UDP_TUNNEL |
13342 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13343 NETIF_F_GSO_UDP_L4 |
13349 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13350 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13352 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13354 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13356 netdev->hw_enc_features |= hw_enc_features;
13358 /* record features VLANs can make use of */
13359 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13361 /* enable macvlan offloads */
13362 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13364 hw_features = hw_enc_features |
13365 NETIF_F_HW_VLAN_CTAG_TX |
13366 NETIF_F_HW_VLAN_CTAG_RX;
13368 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13369 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13371 netdev->hw_features |= hw_features;
13373 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13374 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13376 if (vsi->type == I40E_VSI_MAIN) {
13377 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13378 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13379 /* The following steps are necessary for two reasons. First,
13380 * some older NVM configurations load a default MAC-VLAN
13381 * filter that will accept any tagged packet, and we want to
13382 * replace this with a normal filter. Additionally, it is
13383 * possible our MAC address was provided by the platform using
13384 * Open Firmware or similar.
13386 * Thus, we need to remove the default filter and install one
13387 * specific to the MAC address.
13389 i40e_rm_default_mac_filter(vsi, mac_addr);
13390 spin_lock_bh(&vsi->mac_filter_hash_lock);
13391 i40e_add_mac_filter(vsi, mac_addr);
13392 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13394 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13395 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13396 * the end, which is 4 bytes long, so force truncation of the
13397 * original name by IFNAMSIZ - 4
13399 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13401 pf->vsi[pf->lan_vsi]->netdev->name);
13402 eth_random_addr(mac_addr);
13404 spin_lock_bh(&vsi->mac_filter_hash_lock);
13405 i40e_add_mac_filter(vsi, mac_addr);
13406 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13409 /* Add the broadcast filter so that we initially will receive
13410 * broadcast packets. Note that when a new VLAN is first added the
13411 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13412 * specific filters as part of transitioning into "vlan" operation.
13413 * When more VLANs are added, the driver will copy each existing MAC
13414 * filter and add it for the new VLAN.
13416 * Broadcast filters are handled specially by
13417 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13418 * promiscuous bit instead of adding this directly as a MAC/VLAN
13419 * filter. The subtask will update the correct broadcast promiscuous
13420 * bits as VLANs become active or inactive.
13422 eth_broadcast_addr(broadcast);
13423 spin_lock_bh(&vsi->mac_filter_hash_lock);
13424 i40e_add_mac_filter(vsi, broadcast);
13425 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13427 ether_addr_copy(netdev->dev_addr, mac_addr);
13428 ether_addr_copy(netdev->perm_addr, mac_addr);
13430 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13431 netdev->neigh_priv_len = sizeof(u32) * 4;
13433 netdev->priv_flags |= IFF_UNICAST_FLT;
13434 netdev->priv_flags |= IFF_SUPP_NOFCS;
13435 /* Setup netdev TC information */
13436 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13438 netdev->netdev_ops = &i40e_netdev_ops;
13439 netdev->watchdog_timeo = 5 * HZ;
13440 i40e_set_ethtool_ops(netdev);
13442 /* MTU range: 68 - 9706 */
13443 netdev->min_mtu = ETH_MIN_MTU;
13444 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13450 * i40e_vsi_delete - Delete a VSI from the switch
13451 * @vsi: the VSI being removed
13453 * Returns 0 on success, negative value on failure
13455 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13457 /* remove default VSI is not allowed */
13458 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13461 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13465 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13466 * @vsi: the VSI being queried
13468 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13470 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13472 struct i40e_veb *veb;
13473 struct i40e_pf *pf = vsi->back;
13475 /* Uplink is not a bridge so default to VEB */
13476 if (vsi->veb_idx >= I40E_MAX_VEB)
13479 veb = pf->veb[vsi->veb_idx];
13481 dev_info(&pf->pdev->dev,
13482 "There is no veb associated with the bridge\n");
13486 /* Uplink is a bridge in VEPA mode */
13487 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13490 /* Uplink is a bridge in VEB mode */
13494 /* VEPA is now default bridge, so return 0 */
13499 * i40e_add_vsi - Add a VSI to the switch
13500 * @vsi: the VSI being configured
13502 * This initializes a VSI context depending on the VSI type to be added and
13503 * passes it down to the add_vsi aq command.
13505 static int i40e_add_vsi(struct i40e_vsi *vsi)
13508 struct i40e_pf *pf = vsi->back;
13509 struct i40e_hw *hw = &pf->hw;
13510 struct i40e_vsi_context ctxt;
13511 struct i40e_mac_filter *f;
13512 struct hlist_node *h;
13515 u8 enabled_tc = 0x1; /* TC0 enabled */
13518 memset(&ctxt, 0, sizeof(ctxt));
13519 switch (vsi->type) {
13520 case I40E_VSI_MAIN:
13521 /* The PF's main VSI is already setup as part of the
13522 * device initialization, so we'll not bother with
13523 * the add_vsi call, but we will retrieve the current
13526 ctxt.seid = pf->main_vsi_seid;
13527 ctxt.pf_num = pf->hw.pf_id;
13529 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13530 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13532 dev_info(&pf->pdev->dev,
13533 "couldn't get PF vsi config, err %s aq_err %s\n",
13534 i40e_stat_str(&pf->hw, ret),
13535 i40e_aq_str(&pf->hw,
13536 pf->hw.aq.asq_last_status));
13539 vsi->info = ctxt.info;
13540 vsi->info.valid_sections = 0;
13542 vsi->seid = ctxt.seid;
13543 vsi->id = ctxt.vsi_number;
13545 enabled_tc = i40e_pf_get_tc_map(pf);
13547 /* Source pruning is enabled by default, so the flag is
13548 * negative logic - if it's set, we need to fiddle with
13549 * the VSI to disable source pruning.
13551 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13552 memset(&ctxt, 0, sizeof(ctxt));
13553 ctxt.seid = pf->main_vsi_seid;
13554 ctxt.pf_num = pf->hw.pf_id;
13556 ctxt.info.valid_sections |=
13557 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13558 ctxt.info.switch_id =
13559 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13560 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13562 dev_info(&pf->pdev->dev,
13563 "update vsi failed, err %s aq_err %s\n",
13564 i40e_stat_str(&pf->hw, ret),
13565 i40e_aq_str(&pf->hw,
13566 pf->hw.aq.asq_last_status));
13572 /* MFP mode setup queue map and update VSI */
13573 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13574 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13575 memset(&ctxt, 0, sizeof(ctxt));
13576 ctxt.seid = pf->main_vsi_seid;
13577 ctxt.pf_num = pf->hw.pf_id;
13579 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13580 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13582 dev_info(&pf->pdev->dev,
13583 "update vsi failed, err %s aq_err %s\n",
13584 i40e_stat_str(&pf->hw, ret),
13585 i40e_aq_str(&pf->hw,
13586 pf->hw.aq.asq_last_status));
13590 /* update the local VSI info queue map */
13591 i40e_vsi_update_queue_map(vsi, &ctxt);
13592 vsi->info.valid_sections = 0;
13594 /* Default/Main VSI is only enabled for TC0
13595 * reconfigure it to enable all TCs that are
13596 * available on the port in SFP mode.
13597 * For MFP case the iSCSI PF would use this
13598 * flow to enable LAN+iSCSI TC.
13600 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13602 /* Single TC condition is not fatal,
13603 * message and continue
13605 dev_info(&pf->pdev->dev,
13606 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13608 i40e_stat_str(&pf->hw, ret),
13609 i40e_aq_str(&pf->hw,
13610 pf->hw.aq.asq_last_status));
13615 case I40E_VSI_FDIR:
13616 ctxt.pf_num = hw->pf_id;
13618 ctxt.uplink_seid = vsi->uplink_seid;
13619 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13620 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13621 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13622 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13623 ctxt.info.valid_sections |=
13624 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13625 ctxt.info.switch_id =
13626 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13628 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13631 case I40E_VSI_VMDQ2:
13632 ctxt.pf_num = hw->pf_id;
13634 ctxt.uplink_seid = vsi->uplink_seid;
13635 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13636 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13638 /* This VSI is connected to VEB so the switch_id
13639 * should be set to zero by default.
13641 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13642 ctxt.info.valid_sections |=
13643 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13644 ctxt.info.switch_id =
13645 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13648 /* Setup the VSI tx/rx queue map for TC0 only for now */
13649 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13652 case I40E_VSI_SRIOV:
13653 ctxt.pf_num = hw->pf_id;
13654 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13655 ctxt.uplink_seid = vsi->uplink_seid;
13656 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13657 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13659 /* This VSI is connected to VEB so the switch_id
13660 * should be set to zero by default.
13662 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13663 ctxt.info.valid_sections |=
13664 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13665 ctxt.info.switch_id =
13666 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13669 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13670 ctxt.info.valid_sections |=
13671 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13672 ctxt.info.queueing_opt_flags |=
13673 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13674 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13677 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13678 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13679 if (pf->vf[vsi->vf_id].spoofchk) {
13680 ctxt.info.valid_sections |=
13681 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13682 ctxt.info.sec_flags |=
13683 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13684 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13686 /* Setup the VSI tx/rx queue map for TC0 only for now */
13687 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13690 case I40E_VSI_IWARP:
13691 /* send down message to iWARP */
13698 if (vsi->type != I40E_VSI_MAIN) {
13699 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13701 dev_info(&vsi->back->pdev->dev,
13702 "add vsi failed, err %s aq_err %s\n",
13703 i40e_stat_str(&pf->hw, ret),
13704 i40e_aq_str(&pf->hw,
13705 pf->hw.aq.asq_last_status));
13709 vsi->info = ctxt.info;
13710 vsi->info.valid_sections = 0;
13711 vsi->seid = ctxt.seid;
13712 vsi->id = ctxt.vsi_number;
13715 vsi->active_filters = 0;
13716 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13717 spin_lock_bh(&vsi->mac_filter_hash_lock);
13718 /* If macvlan filters already exist, force them to get loaded */
13719 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13720 f->state = I40E_FILTER_NEW;
13723 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13726 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13727 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13730 /* Update VSI BW information */
13731 ret = i40e_vsi_get_bw_info(vsi);
13733 dev_info(&pf->pdev->dev,
13734 "couldn't get vsi bw info, err %s aq_err %s\n",
13735 i40e_stat_str(&pf->hw, ret),
13736 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13737 /* VSI is already added so not tearing that up */
13746 * i40e_vsi_release - Delete a VSI and free its resources
13747 * @vsi: the VSI being removed
13749 * Returns 0 on success or < 0 on error
13751 int i40e_vsi_release(struct i40e_vsi *vsi)
13753 struct i40e_mac_filter *f;
13754 struct hlist_node *h;
13755 struct i40e_veb *veb = NULL;
13756 struct i40e_pf *pf;
13762 /* release of a VEB-owner or last VSI is not allowed */
13763 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13764 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13765 vsi->seid, vsi->uplink_seid);
13768 if (vsi == pf->vsi[pf->lan_vsi] &&
13769 !test_bit(__I40E_DOWN, pf->state)) {
13770 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13774 uplink_seid = vsi->uplink_seid;
13775 if (vsi->type != I40E_VSI_SRIOV) {
13776 if (vsi->netdev_registered) {
13777 vsi->netdev_registered = false;
13779 /* results in a call to i40e_close() */
13780 unregister_netdev(vsi->netdev);
13783 i40e_vsi_close(vsi);
13785 i40e_vsi_disable_irq(vsi);
13788 spin_lock_bh(&vsi->mac_filter_hash_lock);
13790 /* clear the sync flag on all filters */
13792 __dev_uc_unsync(vsi->netdev, NULL);
13793 __dev_mc_unsync(vsi->netdev, NULL);
13796 /* make sure any remaining filters are marked for deletion */
13797 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13798 __i40e_del_filter(vsi, f);
13800 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13802 i40e_sync_vsi_filters(vsi);
13804 i40e_vsi_delete(vsi);
13805 i40e_vsi_free_q_vectors(vsi);
13807 free_netdev(vsi->netdev);
13808 vsi->netdev = NULL;
13810 i40e_vsi_clear_rings(vsi);
13811 i40e_vsi_clear(vsi);
13813 /* If this was the last thing on the VEB, except for the
13814 * controlling VSI, remove the VEB, which puts the controlling
13815 * VSI onto the next level down in the switch.
13817 * Well, okay, there's one more exception here: don't remove
13818 * the orphan VEBs yet. We'll wait for an explicit remove request
13819 * from up the network stack.
13821 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13823 pf->vsi[i]->uplink_seid == uplink_seid &&
13824 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13825 n++; /* count the VSIs */
13828 for (i = 0; i < I40E_MAX_VEB; i++) {
13831 if (pf->veb[i]->uplink_seid == uplink_seid)
13832 n++; /* count the VEBs */
13833 if (pf->veb[i]->seid == uplink_seid)
13836 if (n == 0 && veb && veb->uplink_seid != 0)
13837 i40e_veb_release(veb);
13843 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13844 * @vsi: ptr to the VSI
13846 * This should only be called after i40e_vsi_mem_alloc() which allocates the
13847 * corresponding SW VSI structure and initializes num_queue_pairs for the
13848 * newly allocated VSI.
13850 * Returns 0 on success or negative on failure
13852 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13855 struct i40e_pf *pf = vsi->back;
13857 if (vsi->q_vectors[0]) {
13858 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13863 if (vsi->base_vector) {
13864 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13865 vsi->seid, vsi->base_vector);
13869 ret = i40e_vsi_alloc_q_vectors(vsi);
13871 dev_info(&pf->pdev->dev,
13872 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13873 vsi->num_q_vectors, vsi->seid, ret);
13874 vsi->num_q_vectors = 0;
13875 goto vector_setup_out;
13878 /* In Legacy mode, we do not have to get any other vector since we
13879 * piggyback on the misc/ICR0 for queue interrupts.
13881 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13883 if (vsi->num_q_vectors)
13884 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13885 vsi->num_q_vectors, vsi->idx);
13886 if (vsi->base_vector < 0) {
13887 dev_info(&pf->pdev->dev,
13888 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13889 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13890 i40e_vsi_free_q_vectors(vsi);
13892 goto vector_setup_out;
13900 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
13901 * @vsi: pointer to the vsi.
13903 * This re-allocates a vsi's queue resources.
13905 * Returns pointer to the successfully allocated and configured VSI sw struct
13906 * on success, otherwise returns NULL on failure.
13908 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13910 u16 alloc_queue_pairs;
13911 struct i40e_pf *pf;
13920 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13921 i40e_vsi_clear_rings(vsi);
13923 i40e_vsi_free_arrays(vsi, false);
13924 i40e_set_num_rings_in_vsi(vsi);
13925 ret = i40e_vsi_alloc_arrays(vsi, false);
13929 alloc_queue_pairs = vsi->alloc_queue_pairs *
13930 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13932 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13934 dev_info(&pf->pdev->dev,
13935 "failed to get tracking for %d queues for VSI %d err %d\n",
13936 alloc_queue_pairs, vsi->seid, ret);
13939 vsi->base_queue = ret;
13941 /* Update the FW view of the VSI. Force a reset of TC and queue
13942 * layout configurations.
13944 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13945 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13946 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13947 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13948 if (vsi->type == I40E_VSI_MAIN)
13949 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13951 /* assign it some queues */
13952 ret = i40e_alloc_rings(vsi);
13956 /* map all of the rings to the q_vectors */
13957 i40e_vsi_map_rings_to_vectors(vsi);
13961 i40e_vsi_free_q_vectors(vsi);
13962 if (vsi->netdev_registered) {
13963 vsi->netdev_registered = false;
13964 unregister_netdev(vsi->netdev);
13965 free_netdev(vsi->netdev);
13966 vsi->netdev = NULL;
13968 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13970 i40e_vsi_clear(vsi);
13975 * i40e_vsi_setup - Set up a VSI by a given type
13976 * @pf: board private structure
13978 * @uplink_seid: the switch element to link to
13979 * @param1: usage depends upon VSI type. For VF types, indicates VF id
13981 * This allocates the sw VSI structure and its queue resources, then add a VSI
13982 * to the identified VEB.
13984 * Returns pointer to the successfully allocated and configure VSI sw struct on
13985 * success, otherwise returns NULL on failure.
13987 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13988 u16 uplink_seid, u32 param1)
13990 struct i40e_vsi *vsi = NULL;
13991 struct i40e_veb *veb = NULL;
13992 u16 alloc_queue_pairs;
13996 /* The requested uplink_seid must be either
13997 * - the PF's port seid
13998 * no VEB is needed because this is the PF
13999 * or this is a Flow Director special case VSI
14000 * - seid of an existing VEB
14001 * - seid of a VSI that owns an existing VEB
14002 * - seid of a VSI that doesn't own a VEB
14003 * a new VEB is created and the VSI becomes the owner
14004 * - seid of the PF VSI, which is what creates the first VEB
14005 * this is a special case of the previous
14007 * Find which uplink_seid we were given and create a new VEB if needed
14009 for (i = 0; i < I40E_MAX_VEB; i++) {
14010 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14016 if (!veb && uplink_seid != pf->mac_seid) {
14018 for (i = 0; i < pf->num_alloc_vsi; i++) {
14019 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14025 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14030 if (vsi->uplink_seid == pf->mac_seid)
14031 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14032 vsi->tc_config.enabled_tc);
14033 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14034 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14035 vsi->tc_config.enabled_tc);
14037 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14038 dev_info(&vsi->back->pdev->dev,
14039 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14042 /* We come up by default in VEPA mode if SRIOV is not
14043 * already enabled, in which case we can't force VEPA
14046 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14047 veb->bridge_mode = BRIDGE_MODE_VEPA;
14048 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14050 i40e_config_bridge_mode(veb);
14052 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14053 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14057 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14061 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14062 uplink_seid = veb->seid;
14065 /* get vsi sw struct */
14066 v_idx = i40e_vsi_mem_alloc(pf, type);
14069 vsi = pf->vsi[v_idx];
14073 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14075 if (type == I40E_VSI_MAIN)
14076 pf->lan_vsi = v_idx;
14077 else if (type == I40E_VSI_SRIOV)
14078 vsi->vf_id = param1;
14079 /* assign it some queues */
14080 alloc_queue_pairs = vsi->alloc_queue_pairs *
14081 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14083 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14085 dev_info(&pf->pdev->dev,
14086 "failed to get tracking for %d queues for VSI %d err=%d\n",
14087 alloc_queue_pairs, vsi->seid, ret);
14090 vsi->base_queue = ret;
14092 /* get a VSI from the hardware */
14093 vsi->uplink_seid = uplink_seid;
14094 ret = i40e_add_vsi(vsi);
14098 switch (vsi->type) {
14099 /* setup the netdev if needed */
14100 case I40E_VSI_MAIN:
14101 case I40E_VSI_VMDQ2:
14102 ret = i40e_config_netdev(vsi);
14105 ret = register_netdev(vsi->netdev);
14108 vsi->netdev_registered = true;
14109 netif_carrier_off(vsi->netdev);
14110 #ifdef CONFIG_I40E_DCB
14111 /* Setup DCB netlink interface */
14112 i40e_dcbnl_setup(vsi);
14113 #endif /* CONFIG_I40E_DCB */
14115 case I40E_VSI_FDIR:
14116 /* set up vectors and rings if needed */
14117 ret = i40e_vsi_setup_vectors(vsi);
14121 ret = i40e_alloc_rings(vsi);
14125 /* map all of the rings to the q_vectors */
14126 i40e_vsi_map_rings_to_vectors(vsi);
14128 i40e_vsi_reset_stats(vsi);
14131 /* no netdev or rings for the other VSI types */
14135 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14136 (vsi->type == I40E_VSI_VMDQ2)) {
14137 ret = i40e_vsi_config_rss(vsi);
14142 i40e_vsi_free_q_vectors(vsi);
14144 if (vsi->netdev_registered) {
14145 vsi->netdev_registered = false;
14146 unregister_netdev(vsi->netdev);
14147 free_netdev(vsi->netdev);
14148 vsi->netdev = NULL;
14151 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14153 i40e_vsi_clear(vsi);
14159 * i40e_veb_get_bw_info - Query VEB BW information
14160 * @veb: the veb to query
14162 * Query the Tx scheduler BW configuration data for given VEB
14164 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14166 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14167 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14168 struct i40e_pf *pf = veb->pf;
14169 struct i40e_hw *hw = &pf->hw;
14174 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14177 dev_info(&pf->pdev->dev,
14178 "query veb bw config failed, err %s aq_err %s\n",
14179 i40e_stat_str(&pf->hw, ret),
14180 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14184 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14187 dev_info(&pf->pdev->dev,
14188 "query veb bw ets config failed, err %s aq_err %s\n",
14189 i40e_stat_str(&pf->hw, ret),
14190 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14194 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14195 veb->bw_max_quanta = ets_data.tc_bw_max;
14196 veb->is_abs_credits = bw_data.absolute_credits_enable;
14197 veb->enabled_tc = ets_data.tc_valid_bits;
14198 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14199 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14200 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14201 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14202 veb->bw_tc_limit_credits[i] =
14203 le16_to_cpu(bw_data.tc_bw_limits[i]);
14204 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14212 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14213 * @pf: board private structure
14215 * On error: returns error code (negative)
14216 * On success: returns vsi index in PF (positive)
14218 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14221 struct i40e_veb *veb;
14224 /* Need to protect the allocation of switch elements at the PF level */
14225 mutex_lock(&pf->switch_mutex);
14227 /* VEB list may be fragmented if VEB creation/destruction has
14228 * been happening. We can afford to do a quick scan to look
14229 * for any free slots in the list.
14231 * find next empty veb slot, looping back around if necessary
14234 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14236 if (i >= I40E_MAX_VEB) {
14238 goto err_alloc_veb; /* out of VEB slots! */
14241 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14244 goto err_alloc_veb;
14248 veb->enabled_tc = 1;
14253 mutex_unlock(&pf->switch_mutex);
14258 * i40e_switch_branch_release - Delete a branch of the switch tree
14259 * @branch: where to start deleting
14261 * This uses recursion to find the tips of the branch to be
14262 * removed, deleting until we get back to and can delete this VEB.
14264 static void i40e_switch_branch_release(struct i40e_veb *branch)
14266 struct i40e_pf *pf = branch->pf;
14267 u16 branch_seid = branch->seid;
14268 u16 veb_idx = branch->idx;
14271 /* release any VEBs on this VEB - RECURSION */
14272 for (i = 0; i < I40E_MAX_VEB; i++) {
14275 if (pf->veb[i]->uplink_seid == branch->seid)
14276 i40e_switch_branch_release(pf->veb[i]);
14279 /* Release the VSIs on this VEB, but not the owner VSI.
14281 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14282 * the VEB itself, so don't use (*branch) after this loop.
14284 for (i = 0; i < pf->num_alloc_vsi; i++) {
14287 if (pf->vsi[i]->uplink_seid == branch_seid &&
14288 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14289 i40e_vsi_release(pf->vsi[i]);
14293 /* There's one corner case where the VEB might not have been
14294 * removed, so double check it here and remove it if needed.
14295 * This case happens if the veb was created from the debugfs
14296 * commands and no VSIs were added to it.
14298 if (pf->veb[veb_idx])
14299 i40e_veb_release(pf->veb[veb_idx]);
14303 * i40e_veb_clear - remove veb struct
14304 * @veb: the veb to remove
14306 static void i40e_veb_clear(struct i40e_veb *veb)
14312 struct i40e_pf *pf = veb->pf;
14314 mutex_lock(&pf->switch_mutex);
14315 if (pf->veb[veb->idx] == veb)
14316 pf->veb[veb->idx] = NULL;
14317 mutex_unlock(&pf->switch_mutex);
14324 * i40e_veb_release - Delete a VEB and free its resources
14325 * @veb: the VEB being removed
14327 void i40e_veb_release(struct i40e_veb *veb)
14329 struct i40e_vsi *vsi = NULL;
14330 struct i40e_pf *pf;
14335 /* find the remaining VSI and check for extras */
14336 for (i = 0; i < pf->num_alloc_vsi; i++) {
14337 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14343 dev_info(&pf->pdev->dev,
14344 "can't remove VEB %d with %d VSIs left\n",
14349 /* move the remaining VSI to uplink veb */
14350 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14351 if (veb->uplink_seid) {
14352 vsi->uplink_seid = veb->uplink_seid;
14353 if (veb->uplink_seid == pf->mac_seid)
14354 vsi->veb_idx = I40E_NO_VEB;
14356 vsi->veb_idx = veb->veb_idx;
14359 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14360 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14363 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14364 i40e_veb_clear(veb);
14368 * i40e_add_veb - create the VEB in the switch
14369 * @veb: the VEB to be instantiated
14370 * @vsi: the controlling VSI
14372 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14374 struct i40e_pf *pf = veb->pf;
14375 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14378 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
14379 veb->enabled_tc, false,
14380 &veb->seid, enable_stats, NULL);
14382 /* get a VEB from the hardware */
14384 dev_info(&pf->pdev->dev,
14385 "couldn't add VEB, err %s aq_err %s\n",
14386 i40e_stat_str(&pf->hw, ret),
14387 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14391 /* get statistics counter */
14392 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14393 &veb->stats_idx, NULL, NULL, NULL);
14395 dev_info(&pf->pdev->dev,
14396 "couldn't get VEB statistics idx, err %s aq_err %s\n",
14397 i40e_stat_str(&pf->hw, ret),
14398 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14401 ret = i40e_veb_get_bw_info(veb);
14403 dev_info(&pf->pdev->dev,
14404 "couldn't get VEB bw info, err %s aq_err %s\n",
14405 i40e_stat_str(&pf->hw, ret),
14406 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14407 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14411 vsi->uplink_seid = veb->seid;
14412 vsi->veb_idx = veb->idx;
14413 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14419 * i40e_veb_setup - Set up a VEB
14420 * @pf: board private structure
14421 * @flags: VEB setup flags
14422 * @uplink_seid: the switch element to link to
14423 * @vsi_seid: the initial VSI seid
14424 * @enabled_tc: Enabled TC bit-map
14426 * This allocates the sw VEB structure and links it into the switch
14427 * It is possible and legal for this to be a duplicate of an already
14428 * existing VEB. It is also possible for both uplink and vsi seids
14429 * to be zero, in order to create a floating VEB.
14431 * Returns pointer to the successfully allocated VEB sw struct on
14432 * success, otherwise returns NULL on failure.
14434 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14435 u16 uplink_seid, u16 vsi_seid,
14438 struct i40e_veb *veb, *uplink_veb = NULL;
14439 int vsi_idx, veb_idx;
14442 /* if one seid is 0, the other must be 0 to create a floating relay */
14443 if ((uplink_seid == 0 || vsi_seid == 0) &&
14444 (uplink_seid + vsi_seid != 0)) {
14445 dev_info(&pf->pdev->dev,
14446 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14447 uplink_seid, vsi_seid);
14451 /* make sure there is such a vsi and uplink */
14452 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14453 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14455 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14456 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14461 if (uplink_seid && uplink_seid != pf->mac_seid) {
14462 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14463 if (pf->veb[veb_idx] &&
14464 pf->veb[veb_idx]->seid == uplink_seid) {
14465 uplink_veb = pf->veb[veb_idx];
14470 dev_info(&pf->pdev->dev,
14471 "uplink seid %d not found\n", uplink_seid);
14476 /* get veb sw struct */
14477 veb_idx = i40e_veb_mem_alloc(pf);
14480 veb = pf->veb[veb_idx];
14481 veb->flags = flags;
14482 veb->uplink_seid = uplink_seid;
14483 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14484 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14486 /* create the VEB in the switch */
14487 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14490 if (vsi_idx == pf->lan_vsi)
14491 pf->lan_veb = veb->idx;
14496 i40e_veb_clear(veb);
14502 * i40e_setup_pf_switch_element - set PF vars based on switch type
14503 * @pf: board private structure
14504 * @ele: element we are building info from
14505 * @num_reported: total number of elements
14506 * @printconfig: should we print the contents
14508 * helper function to assist in extracting a few useful SEID values.
14510 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14511 struct i40e_aqc_switch_config_element_resp *ele,
14512 u16 num_reported, bool printconfig)
14514 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14515 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14516 u8 element_type = ele->element_type;
14517 u16 seid = le16_to_cpu(ele->seid);
14520 dev_info(&pf->pdev->dev,
14521 "type=%d seid=%d uplink=%d downlink=%d\n",
14522 element_type, seid, uplink_seid, downlink_seid);
14524 switch (element_type) {
14525 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14526 pf->mac_seid = seid;
14528 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14530 if (uplink_seid != pf->mac_seid)
14532 if (pf->lan_veb >= I40E_MAX_VEB) {
14535 /* find existing or else empty VEB */
14536 for (v = 0; v < I40E_MAX_VEB; v++) {
14537 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14542 if (pf->lan_veb >= I40E_MAX_VEB) {
14543 v = i40e_veb_mem_alloc(pf);
14549 if (pf->lan_veb >= I40E_MAX_VEB)
14552 pf->veb[pf->lan_veb]->seid = seid;
14553 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14554 pf->veb[pf->lan_veb]->pf = pf;
14555 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14557 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14558 if (num_reported != 1)
14560 /* This is immediately after a reset so we can assume this is
14563 pf->mac_seid = uplink_seid;
14564 pf->pf_seid = downlink_seid;
14565 pf->main_vsi_seid = seid;
14567 dev_info(&pf->pdev->dev,
14568 "pf_seid=%d main_vsi_seid=%d\n",
14569 pf->pf_seid, pf->main_vsi_seid);
14571 case I40E_SWITCH_ELEMENT_TYPE_PF:
14572 case I40E_SWITCH_ELEMENT_TYPE_VF:
14573 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14574 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14575 case I40E_SWITCH_ELEMENT_TYPE_PE:
14576 case I40E_SWITCH_ELEMENT_TYPE_PA:
14577 /* ignore these for now */
14580 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14581 element_type, seid);
14587 * i40e_fetch_switch_configuration - Get switch config from firmware
14588 * @pf: board private structure
14589 * @printconfig: should we print the contents
14591 * Get the current switch configuration from the device and
14592 * extract a few useful SEID values.
14594 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14596 struct i40e_aqc_get_switch_config_resp *sw_config;
14602 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14606 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14608 u16 num_reported, num_total;
14610 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14614 dev_info(&pf->pdev->dev,
14615 "get switch config failed err %s aq_err %s\n",
14616 i40e_stat_str(&pf->hw, ret),
14617 i40e_aq_str(&pf->hw,
14618 pf->hw.aq.asq_last_status));
14623 num_reported = le16_to_cpu(sw_config->header.num_reported);
14624 num_total = le16_to_cpu(sw_config->header.num_total);
14627 dev_info(&pf->pdev->dev,
14628 "header: %d reported %d total\n",
14629 num_reported, num_total);
14631 for (i = 0; i < num_reported; i++) {
14632 struct i40e_aqc_switch_config_element_resp *ele =
14633 &sw_config->element[i];
14635 i40e_setup_pf_switch_element(pf, ele, num_reported,
14638 } while (next_seid != 0);
14645 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14646 * @pf: board private structure
14647 * @reinit: if the Main VSI needs to re-initialized.
14649 * Returns 0 on success, negative value on failure
14651 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14656 /* find out what's out there already */
14657 ret = i40e_fetch_switch_configuration(pf, false);
14659 dev_info(&pf->pdev->dev,
14660 "couldn't fetch switch config, err %s aq_err %s\n",
14661 i40e_stat_str(&pf->hw, ret),
14662 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14665 i40e_pf_reset_stats(pf);
14667 /* set the switch config bit for the whole device to
14668 * support limited promisc or true promisc
14669 * when user requests promisc. The default is limited
14673 if ((pf->hw.pf_id == 0) &&
14674 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14675 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14676 pf->last_sw_conf_flags = flags;
14679 if (pf->hw.pf_id == 0) {
14682 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14683 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14685 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14686 dev_info(&pf->pdev->dev,
14687 "couldn't set switch config bits, err %s aq_err %s\n",
14688 i40e_stat_str(&pf->hw, ret),
14689 i40e_aq_str(&pf->hw,
14690 pf->hw.aq.asq_last_status));
14691 /* not a fatal problem, just keep going */
14693 pf->last_sw_conf_valid_flags = valid_flags;
14696 /* first time setup */
14697 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14698 struct i40e_vsi *vsi = NULL;
14701 /* Set up the PF VSI associated with the PF's main VSI
14702 * that is already in the HW switch
14704 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14705 uplink_seid = pf->veb[pf->lan_veb]->seid;
14707 uplink_seid = pf->mac_seid;
14708 if (pf->lan_vsi == I40E_NO_VSI)
14709 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14711 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14713 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14714 i40e_cloud_filter_exit(pf);
14715 i40e_fdir_teardown(pf);
14719 /* force a reset of TC and queue layout configurations */
14720 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14722 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14723 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14724 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14726 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14728 i40e_fdir_sb_setup(pf);
14730 /* Setup static PF queue filter control settings */
14731 ret = i40e_setup_pf_filter_control(pf);
14733 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14735 /* Failure here should not stop continuing other steps */
14738 /* enable RSS in the HW, even for only one queue, as the stack can use
14741 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14742 i40e_pf_config_rss(pf);
14744 /* fill in link information and enable LSE reporting */
14745 i40e_link_event(pf);
14747 /* Initialize user-specific link properties */
14748 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14749 I40E_AQ_AN_COMPLETED) ? true : false);
14753 /* repopulate tunnel port filters */
14754 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
14760 * i40e_determine_queue_usage - Work out queue distribution
14761 * @pf: board private structure
14763 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14768 pf->num_lan_qps = 0;
14770 /* Find the max queues to be put into basic use. We'll always be
14771 * using TC0, whether or not DCB is running, and TC0 will get the
14774 queues_left = pf->hw.func_caps.num_tx_qp;
14776 if ((queues_left == 1) ||
14777 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14778 /* one qp for PF, no queues for anything else */
14780 pf->alloc_rss_size = pf->num_lan_qps = 1;
14782 /* make sure all the fancies are disabled */
14783 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14784 I40E_FLAG_IWARP_ENABLED |
14785 I40E_FLAG_FD_SB_ENABLED |
14786 I40E_FLAG_FD_ATR_ENABLED |
14787 I40E_FLAG_DCB_CAPABLE |
14788 I40E_FLAG_DCB_ENABLED |
14789 I40E_FLAG_SRIOV_ENABLED |
14790 I40E_FLAG_VMDQ_ENABLED);
14791 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14792 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14793 I40E_FLAG_FD_SB_ENABLED |
14794 I40E_FLAG_FD_ATR_ENABLED |
14795 I40E_FLAG_DCB_CAPABLE))) {
14796 /* one qp for PF */
14797 pf->alloc_rss_size = pf->num_lan_qps = 1;
14798 queues_left -= pf->num_lan_qps;
14800 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14801 I40E_FLAG_IWARP_ENABLED |
14802 I40E_FLAG_FD_SB_ENABLED |
14803 I40E_FLAG_FD_ATR_ENABLED |
14804 I40E_FLAG_DCB_ENABLED |
14805 I40E_FLAG_VMDQ_ENABLED);
14806 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14808 /* Not enough queues for all TCs */
14809 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14810 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14811 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14812 I40E_FLAG_DCB_ENABLED);
14813 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14816 /* limit lan qps to the smaller of qps, cpus or msix */
14817 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14818 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14819 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14820 pf->num_lan_qps = q_max;
14822 queues_left -= pf->num_lan_qps;
14825 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14826 if (queues_left > 1) {
14827 queues_left -= 1; /* save 1 queue for FD */
14829 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14830 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14831 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14835 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14836 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14837 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14838 (queues_left / pf->num_vf_qps));
14839 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14842 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14843 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14844 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14845 (queues_left / pf->num_vmdq_qps));
14846 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14849 pf->queues_left = queues_left;
14850 dev_dbg(&pf->pdev->dev,
14851 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14852 pf->hw.func_caps.num_tx_qp,
14853 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14854 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14855 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14860 * i40e_setup_pf_filter_control - Setup PF static filter control
14861 * @pf: PF to be setup
14863 * i40e_setup_pf_filter_control sets up a PF's initial filter control
14864 * settings. If PE/FCoE are enabled then it will also set the per PF
14865 * based filter sizes required for them. It also enables Flow director,
14866 * ethertype and macvlan type filter settings for the pf.
14868 * Returns 0 on success, negative on failure
14870 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14872 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14874 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14876 /* Flow Director is enabled */
14877 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14878 settings->enable_fdir = true;
14880 /* Ethtype and MACVLAN filters enabled for PF */
14881 settings->enable_ethtype = true;
14882 settings->enable_macvlan = true;
14884 if (i40e_set_filter_control(&pf->hw, settings))
14890 #define INFO_STRING_LEN 255
14891 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14892 static void i40e_print_features(struct i40e_pf *pf)
14894 struct i40e_hw *hw = &pf->hw;
14898 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14902 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14903 #ifdef CONFIG_PCI_IOV
14904 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14906 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14907 pf->hw.func_caps.num_vsis,
14908 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14909 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14910 i += scnprintf(&buf[i], REMAIN(i), " RSS");
14911 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14912 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
14913 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14914 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
14915 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
14917 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14918 i += scnprintf(&buf[i], REMAIN(i), " DCB");
14919 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
14920 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
14921 if (pf->flags & I40E_FLAG_PTP)
14922 i += scnprintf(&buf[i], REMAIN(i), " PTP");
14923 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14924 i += scnprintf(&buf[i], REMAIN(i), " VEB");
14926 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
14928 dev_info(&pf->pdev->dev, "%s\n", buf);
14930 WARN_ON(i > INFO_STRING_LEN);
14934 * i40e_get_platform_mac_addr - get platform-specific MAC address
14935 * @pdev: PCI device information struct
14936 * @pf: board private structure
14938 * Look up the MAC address for the device. First we'll try
14939 * eth_platform_get_mac_address, which will check Open Firmware, or arch
14940 * specific fallback. Otherwise, we'll default to the stored value in
14943 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14945 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14946 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14950 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14951 * @fec_cfg: FEC option to set in flags
14952 * @flags: ptr to flags in which we set FEC option
14954 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14956 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14957 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14958 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14959 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14960 *flags |= I40E_FLAG_RS_FEC;
14961 *flags &= ~I40E_FLAG_BASE_R_FEC;
14963 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14964 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14965 *flags |= I40E_FLAG_BASE_R_FEC;
14966 *flags &= ~I40E_FLAG_RS_FEC;
14969 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14973 * i40e_check_recovery_mode - check if we are running transition firmware
14974 * @pf: board private structure
14976 * Check registers indicating the firmware runs in recovery mode. Sets the
14977 * appropriate driver state.
14979 * Returns true if the recovery mode was detected, false otherwise
14981 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14983 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
14985 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
14986 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14987 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14988 set_bit(__I40E_RECOVERY_MODE, pf->state);
14992 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14993 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
14999 * i40e_pf_loop_reset - perform reset in a loop.
15000 * @pf: board private structure
15002 * This function is useful when a NIC is about to enter recovery mode.
15003 * When a NIC's internal data structures are corrupted the NIC's
15004 * firmware is going to enter recovery mode.
15005 * Right after a POR it takes about 7 minutes for firmware to enter
15006 * recovery mode. Until that time a NIC is in some kind of intermediate
15007 * state. After that time period the NIC almost surely enters
15008 * recovery mode. The only way for a driver to detect intermediate
15009 * state is to issue a series of pf-resets and check a return value.
15010 * If a PF reset returns success then the firmware could be in recovery
15011 * mode so the caller of this code needs to check for recovery mode
15012 * if this function returns success. There is a little chance that
15013 * firmware will hang in intermediate state forever.
15014 * Since waiting 7 minutes is quite a lot of time this function waits
15015 * 10 seconds and then gives up by returning an error.
15017 * Return 0 on success, negative on failure.
15019 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
15021 /* wait max 10 seconds for PF reset to succeed */
15022 const unsigned long time_end = jiffies + 10 * HZ;
15024 struct i40e_hw *hw = &pf->hw;
15027 ret = i40e_pf_reset(hw);
15028 while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
15029 usleep_range(10000, 20000);
15030 ret = i40e_pf_reset(hw);
15033 if (ret == I40E_SUCCESS)
15036 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15042 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15043 * @pf: board private structure
15045 * Check FW registers to determine if FW issued unexpected EMP Reset.
15046 * Every time when unexpected EMP Reset occurs the FW increments
15047 * a counter of unexpected EMP Resets. When the counter reaches 10
15048 * the FW should enter the Recovery mode
15050 * Returns true if FW issued unexpected EMP Reset
15052 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15054 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15055 I40E_GL_FWSTS_FWS1B_MASK;
15056 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15057 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15061 * i40e_handle_resets - handle EMP resets and PF resets
15062 * @pf: board private structure
15064 * Handle both EMP resets and PF resets and conclude whether there are
15065 * any issues regarding these resets. If there are any issues then
15066 * generate log entry.
15068 * Return 0 if NIC is healthy or negative value when there are issues
15071 static i40e_status i40e_handle_resets(struct i40e_pf *pf)
15073 const i40e_status pfr = i40e_pf_loop_reset(pf);
15074 const bool is_empr = i40e_check_fw_empr(pf);
15076 if (is_empr || pfr != I40E_SUCCESS)
15077 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15079 return is_empr ? I40E_ERR_RESET_FAILED : pfr;
15083 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15084 * @pf: board private structure
15085 * @hw: ptr to the hardware info
15087 * This function does a minimal setup of all subsystems needed for running
15090 * Returns 0 on success, negative on failure
15092 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15094 struct i40e_vsi *vsi;
15098 pci_save_state(pf->pdev);
15100 /* set up periodic task facility */
15101 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15102 pf->service_timer_period = HZ;
15104 INIT_WORK(&pf->service_task, i40e_service_task);
15105 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15107 err = i40e_init_interrupt_scheme(pf);
15109 goto err_switch_setup;
15111 /* The number of VSIs reported by the FW is the minimum guaranteed
15112 * to us; HW supports far more and we share the remaining pool with
15113 * the other PFs. We allocate space for more than the guarantee with
15114 * the understanding that we might not get them all later.
15116 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15117 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15119 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15121 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15122 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15126 goto err_switch_setup;
15129 /* We allocate one VSI which is needed as absolute minimum
15130 * in order to register the netdev
15132 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15134 goto err_switch_setup;
15135 pf->lan_vsi = v_idx;
15136 vsi = pf->vsi[v_idx];
15138 goto err_switch_setup;
15139 vsi->alloc_queue_pairs = 1;
15140 err = i40e_config_netdev(vsi);
15142 goto err_switch_setup;
15143 err = register_netdev(vsi->netdev);
15145 goto err_switch_setup;
15146 vsi->netdev_registered = true;
15147 i40e_dbg_pf_init(pf);
15149 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15151 goto err_switch_setup;
15153 /* tell the firmware that we're starting */
15154 i40e_send_version(pf);
15156 /* since everything's happy, start the service_task timer */
15157 mod_timer(&pf->service_timer,
15158 round_jiffies(jiffies + pf->service_timer_period));
15163 i40e_reset_interrupt_capability(pf);
15164 del_timer_sync(&pf->service_timer);
15165 i40e_shutdown_adminq(hw);
15166 iounmap(hw->hw_addr);
15167 pci_disable_pcie_error_reporting(pf->pdev);
15168 pci_release_mem_regions(pf->pdev);
15169 pci_disable_device(pf->pdev);
15176 * i40e_probe - Device initialization routine
15177 * @pdev: PCI device information struct
15178 * @ent: entry in i40e_pci_tbl
15180 * i40e_probe initializes a PF identified by a pci_dev structure.
15181 * The OS initialization, configuring of the PF private structure,
15182 * and a hardware reset occur.
15184 * Returns 0 on success, negative on failure
15186 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15188 struct i40e_aq_get_phy_abilities_resp abilities;
15189 #ifdef CONFIG_I40E_DCB
15190 enum i40e_get_fw_lldp_status_resp lldp_status;
15191 i40e_status status;
15192 #endif /* CONFIG_I40E_DCB */
15193 struct i40e_pf *pf;
15194 struct i40e_hw *hw;
15195 static u16 pfs_found;
15203 err = pci_enable_device_mem(pdev);
15207 /* set up for high or low dma */
15208 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15210 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
15212 dev_err(&pdev->dev,
15213 "DMA configuration failed: 0x%x\n", err);
15218 /* set up pci connections */
15219 err = pci_request_mem_regions(pdev, i40e_driver_name);
15221 dev_info(&pdev->dev,
15222 "pci_request_selected_regions failed %d\n", err);
15226 pci_enable_pcie_error_reporting(pdev);
15227 pci_set_master(pdev);
15229 /* Now that we have a PCI connection, we need to do the
15230 * low level device setup. This is primarily setting up
15231 * the Admin Queue structures and then querying for the
15232 * device's current profile information.
15234 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
15241 set_bit(__I40E_DOWN, pf->state);
15246 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15247 I40E_MAX_CSR_SPACE);
15248 /* We believe that the highest register to read is
15249 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15250 * is not less than that before mapping to prevent a
15253 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15254 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15259 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15260 if (!hw->hw_addr) {
15262 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15263 (unsigned int)pci_resource_start(pdev, 0),
15264 pf->ioremap_len, err);
15267 hw->vendor_id = pdev->vendor;
15268 hw->device_id = pdev->device;
15269 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15270 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15271 hw->subsystem_device_id = pdev->subsystem_device;
15272 hw->bus.device = PCI_SLOT(pdev->devfn);
15273 hw->bus.func = PCI_FUNC(pdev->devfn);
15274 hw->bus.bus_id = pdev->bus->number;
15275 pf->instance = pfs_found;
15277 /* Select something other than the 802.1ad ethertype for the
15278 * switch to use internally and drop on ingress.
15280 hw->switch_tag = 0xffff;
15281 hw->first_tag = ETH_P_8021AD;
15282 hw->second_tag = ETH_P_8021Q;
15284 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15285 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15286 INIT_LIST_HEAD(&pf->ddp_old_prof);
15288 /* set up the locks for the AQ, do this only once in probe
15289 * and destroy them only once in remove
15291 mutex_init(&hw->aq.asq_mutex);
15292 mutex_init(&hw->aq.arq_mutex);
15294 pf->msg_enable = netif_msg_init(debug,
15299 pf->hw.debug_mask = debug;
15301 /* do a special CORER for clearing PXE mode once at init */
15302 if (hw->revision_id == 0 &&
15303 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15304 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15309 i40e_clear_pxe_mode(hw);
15312 /* Reset here to make sure all is clean and to define PF 'n' */
15315 err = i40e_set_mac_type(hw);
15317 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15322 err = i40e_handle_resets(pf);
15326 i40e_check_recovery_mode(pf);
15328 hw->aq.num_arq_entries = I40E_AQ_LEN;
15329 hw->aq.num_asq_entries = I40E_AQ_LEN;
15330 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15331 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15332 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15334 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15336 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15338 err = i40e_init_shared_code(hw);
15340 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15345 /* set up a default setting for link flow control */
15346 pf->hw.fc.requested_mode = I40E_FC_NONE;
15348 err = i40e_init_adminq(hw);
15350 if (err == I40E_ERR_FIRMWARE_API_VERSION)
15351 dev_info(&pdev->dev,
15352 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15353 hw->aq.api_maj_ver,
15354 hw->aq.api_min_ver,
15355 I40E_FW_API_VERSION_MAJOR,
15356 I40E_FW_MINOR_VERSION(hw));
15358 dev_info(&pdev->dev,
15359 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15363 i40e_get_oem_version(hw);
15365 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15366 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15367 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15368 hw->aq.api_maj_ver, hw->aq.api_min_ver,
15369 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
15370 hw->subsystem_vendor_id, hw->subsystem_device_id);
15372 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15373 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15374 dev_info(&pdev->dev,
15375 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
15376 hw->aq.api_maj_ver,
15377 hw->aq.api_min_ver,
15378 I40E_FW_API_VERSION_MAJOR,
15379 I40E_FW_MINOR_VERSION(hw));
15380 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15381 dev_info(&pdev->dev,
15382 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15383 hw->aq.api_maj_ver,
15384 hw->aq.api_min_ver,
15385 I40E_FW_API_VERSION_MAJOR,
15386 I40E_FW_MINOR_VERSION(hw));
15388 i40e_verify_eeprom(pf);
15390 /* Rev 0 hardware was never productized */
15391 if (hw->revision_id < 1)
15392 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15394 i40e_clear_pxe_mode(hw);
15396 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15398 goto err_adminq_setup;
15400 err = i40e_sw_init(pf);
15402 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15406 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15407 return i40e_init_recovery_mode(pf, hw);
15409 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15410 hw->func_caps.num_rx_qp, 0, 0);
15412 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15413 goto err_init_lan_hmc;
15416 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15418 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15420 goto err_configure_lan_hmc;
15423 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15424 * Ignore error return codes because if it was already disabled via
15425 * hardware settings this will fail
15427 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15428 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15429 i40e_aq_stop_lldp(hw, true, false, NULL);
15432 /* allow a platform config to override the HW addr */
15433 i40e_get_platform_mac_addr(pdev, pf);
15435 if (!is_valid_ether_addr(hw->mac.addr)) {
15436 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15440 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15441 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15442 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15443 if (is_valid_ether_addr(hw->mac.port_addr))
15444 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15446 pci_set_drvdata(pdev, pf);
15447 pci_save_state(pdev);
15449 #ifdef CONFIG_I40E_DCB
15450 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15452 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15453 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15454 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15455 dev_info(&pdev->dev,
15456 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15457 "FW LLDP is disabled\n" :
15458 "FW LLDP is enabled\n");
15460 /* Enable FW to write default DCB config on link-up */
15461 i40e_aq_set_dcb_parameters(hw, true, NULL);
15463 err = i40e_init_pf_dcb(pf);
15465 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15466 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15467 /* Continue without DCB enabled */
15469 #endif /* CONFIG_I40E_DCB */
15471 /* set up periodic task facility */
15472 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15473 pf->service_timer_period = HZ;
15475 INIT_WORK(&pf->service_task, i40e_service_task);
15476 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15478 /* NVM bit on means WoL disabled for the port */
15479 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15480 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15481 pf->wol_en = false;
15484 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15486 /* set up the main switch operations */
15487 i40e_determine_queue_usage(pf);
15488 err = i40e_init_interrupt_scheme(pf);
15490 goto err_switch_setup;
15492 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15493 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15494 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15495 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15496 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15497 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15498 UDP_TUNNEL_TYPE_GENEVE;
15500 /* The number of VSIs reported by the FW is the minimum guaranteed
15501 * to us; HW supports far more and we share the remaining pool with
15502 * the other PFs. We allocate space for more than the guarantee with
15503 * the understanding that we might not get them all later.
15505 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15506 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15508 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15509 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15510 dev_warn(&pf->pdev->dev,
15511 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15512 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15513 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15516 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15517 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15521 goto err_switch_setup;
15524 #ifdef CONFIG_PCI_IOV
15525 /* prep for VF support */
15526 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15527 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15528 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15529 if (pci_num_vf(pdev))
15530 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15533 err = i40e_setup_pf_switch(pf, false);
15535 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15538 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15540 /* Make sure flow control is set according to current settings */
15541 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15542 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15543 dev_dbg(&pf->pdev->dev,
15544 "Set fc with err %s aq_err %s on get_phy_cap\n",
15545 i40e_stat_str(hw, err),
15546 i40e_aq_str(hw, hw->aq.asq_last_status));
15547 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15548 dev_dbg(&pf->pdev->dev,
15549 "Set fc with err %s aq_err %s on set_phy_config\n",
15550 i40e_stat_str(hw, err),
15551 i40e_aq_str(hw, hw->aq.asq_last_status));
15552 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15553 dev_dbg(&pf->pdev->dev,
15554 "Set fc with err %s aq_err %s on get_link_info\n",
15555 i40e_stat_str(hw, err),
15556 i40e_aq_str(hw, hw->aq.asq_last_status));
15558 /* if FDIR VSI was set up, start it now */
15559 for (i = 0; i < pf->num_alloc_vsi; i++) {
15560 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15561 i40e_vsi_open(pf->vsi[i]);
15566 /* The driver only wants link up/down and module qualification
15567 * reports from firmware. Note the negative logic.
15569 err = i40e_aq_set_phy_int_mask(&pf->hw,
15570 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15571 I40E_AQ_EVENT_MEDIA_NA |
15572 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15574 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15575 i40e_stat_str(&pf->hw, err),
15576 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15578 /* Reconfigure hardware for allowing smaller MSS in the case
15579 * of TSO, so that we avoid the MDD being fired and causing
15580 * a reset in the case of small MSS+TSO.
15582 val = rd32(hw, I40E_REG_MSS);
15583 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15584 val &= ~I40E_REG_MSS_MIN_MASK;
15585 val |= I40E_64BYTE_MSS;
15586 wr32(hw, I40E_REG_MSS, val);
15589 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15591 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15593 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15594 i40e_stat_str(&pf->hw, err),
15595 i40e_aq_str(&pf->hw,
15596 pf->hw.aq.asq_last_status));
15598 /* The main driver is (mostly) up and happy. We need to set this state
15599 * before setting up the misc vector or we get a race and the vector
15600 * ends up disabled forever.
15602 clear_bit(__I40E_DOWN, pf->state);
15604 /* In case of MSIX we are going to setup the misc vector right here
15605 * to handle admin queue events etc. In case of legacy and MSI
15606 * the misc functionality and queue processing is combined in
15607 * the same vector and that gets setup at open.
15609 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15610 err = i40e_setup_misc_vector(pf);
15612 dev_info(&pdev->dev,
15613 "setup of misc vector failed: %d\n", err);
15618 #ifdef CONFIG_PCI_IOV
15619 /* prep for VF support */
15620 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15621 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15622 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15623 /* disable link interrupts for VFs */
15624 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15625 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15626 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15629 if (pci_num_vf(pdev)) {
15630 dev_info(&pdev->dev,
15631 "Active VFs found, allocating resources.\n");
15632 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15634 dev_info(&pdev->dev,
15635 "Error %d allocating resources for existing VFs\n",
15639 #endif /* CONFIG_PCI_IOV */
15641 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15642 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15643 pf->num_iwarp_msix,
15644 I40E_IWARP_IRQ_PILE_ID);
15645 if (pf->iwarp_base_vector < 0) {
15646 dev_info(&pdev->dev,
15647 "failed to get tracking for %d vectors for IWARP err=%d\n",
15648 pf->num_iwarp_msix, pf->iwarp_base_vector);
15649 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15653 i40e_dbg_pf_init(pf);
15655 /* tell the firmware that we're starting */
15656 i40e_send_version(pf);
15658 /* since everything's happy, start the service_task timer */
15659 mod_timer(&pf->service_timer,
15660 round_jiffies(jiffies + pf->service_timer_period));
15662 /* add this PF to client device list and launch a client service task */
15663 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15664 err = i40e_lan_add_device(pf);
15666 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15670 #define PCI_SPEED_SIZE 8
15671 #define PCI_WIDTH_SIZE 8
15672 /* Devices on the IOSF bus do not have this information
15673 * and will report PCI Gen 1 x 1 by default so don't bother
15676 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15677 char speed[PCI_SPEED_SIZE] = "Unknown";
15678 char width[PCI_WIDTH_SIZE] = "Unknown";
15680 /* Get the negotiated link width and speed from PCI config
15683 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15686 i40e_set_pci_config_data(hw, link_status);
15688 switch (hw->bus.speed) {
15689 case i40e_bus_speed_8000:
15690 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15691 case i40e_bus_speed_5000:
15692 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15693 case i40e_bus_speed_2500:
15694 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15698 switch (hw->bus.width) {
15699 case i40e_bus_width_pcie_x8:
15700 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15701 case i40e_bus_width_pcie_x4:
15702 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15703 case i40e_bus_width_pcie_x2:
15704 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15705 case i40e_bus_width_pcie_x1:
15706 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15711 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15714 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15715 hw->bus.speed < i40e_bus_speed_8000) {
15716 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15717 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15721 /* get the requested speeds from the fw */
15722 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15724 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15725 i40e_stat_str(&pf->hw, err),
15726 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15727 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15729 /* set the FEC config due to the board capabilities */
15730 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15732 /* get the supported phy types from the fw */
15733 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15735 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15736 i40e_stat_str(&pf->hw, err),
15737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15739 /* make sure the MFS hasn't been set lower than the default */
15740 #define MAX_FRAME_SIZE_DEFAULT 0x2600
15741 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
15742 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
15743 if (val < MAX_FRAME_SIZE_DEFAULT)
15744 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
15747 /* Add a filter to drop all Flow control frames from any VSI from being
15748 * transmitted. By doing so we stop a malicious VF from sending out
15749 * PAUSE or PFC frames and potentially controlling traffic for other
15751 * The FW can still send Flow control frames if enabled.
15753 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15754 pf->main_vsi_seid);
15755 #ifdef CONFIG_I40E_DCB
15756 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
15757 i40e_set_lldp_forwarding(pf, true);
15758 #endif /* CONFIG_I40E_DCB */
15760 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15761 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15762 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15763 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15764 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15765 /* print a string summarizing features */
15766 i40e_print_features(pf);
15770 /* Unwind what we've done if something failed in the setup */
15772 set_bit(__I40E_DOWN, pf->state);
15773 i40e_clear_interrupt_scheme(pf);
15776 i40e_reset_interrupt_capability(pf);
15777 del_timer_sync(&pf->service_timer);
15779 err_configure_lan_hmc:
15780 (void)i40e_shutdown_lan_hmc(hw);
15782 kfree(pf->qp_pile);
15786 iounmap(hw->hw_addr);
15790 pci_disable_pcie_error_reporting(pdev);
15791 pci_release_mem_regions(pdev);
15794 pci_disable_device(pdev);
15799 * i40e_remove - Device removal routine
15800 * @pdev: PCI device information struct
15802 * i40e_remove is called by the PCI subsystem to alert the driver
15803 * that is should release a PCI device. This could be caused by a
15804 * Hot-Plug event, or because the driver is going to be removed from
15807 static void i40e_remove(struct pci_dev *pdev)
15809 struct i40e_pf *pf = pci_get_drvdata(pdev);
15810 struct i40e_hw *hw = &pf->hw;
15811 i40e_status ret_code;
15814 i40e_dbg_pf_exit(pf);
15818 /* Disable RSS in hw */
15819 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15820 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15822 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
15823 usleep_range(1000, 2000);
15825 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15826 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
15828 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15830 /* no more scheduling of any task */
15831 set_bit(__I40E_SUSPENDED, pf->state);
15832 set_bit(__I40E_DOWN, pf->state);
15833 if (pf->service_timer.function)
15834 del_timer_sync(&pf->service_timer);
15835 if (pf->service_task.func)
15836 cancel_work_sync(&pf->service_task);
15838 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15839 struct i40e_vsi *vsi = pf->vsi[0];
15841 /* We know that we have allocated only one vsi for this PF,
15842 * it was just for registering netdevice, so the interface
15843 * could be visible in the 'ifconfig' output
15845 unregister_netdev(vsi->netdev);
15846 free_netdev(vsi->netdev);
15851 /* Client close must be called explicitly here because the timer
15852 * has been stopped.
15854 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15856 i40e_fdir_teardown(pf);
15858 /* If there is a switch structure or any orphans, remove them.
15859 * This will leave only the PF's VSI remaining.
15861 for (i = 0; i < I40E_MAX_VEB; i++) {
15865 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15866 pf->veb[i]->uplink_seid == 0)
15867 i40e_switch_branch_release(pf->veb[i]);
15870 /* Now we can shutdown the PF's VSI, just before we kill
15873 if (pf->vsi[pf->lan_vsi])
15874 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15876 i40e_cloud_filter_exit(pf);
15878 /* remove attached clients */
15879 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15880 ret_code = i40e_lan_del_device(pf);
15882 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15886 /* shutdown and destroy the HMC */
15887 if (hw->hmc.hmc_obj) {
15888 ret_code = i40e_shutdown_lan_hmc(hw);
15890 dev_warn(&pdev->dev,
15891 "Failed to destroy the HMC resources: %d\n",
15896 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15897 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15898 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15899 free_irq(pf->pdev->irq, pf);
15901 /* shutdown the adminq */
15902 i40e_shutdown_adminq(hw);
15904 /* destroy the locks only once, here */
15905 mutex_destroy(&hw->aq.arq_mutex);
15906 mutex_destroy(&hw->aq.asq_mutex);
15908 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
15910 i40e_clear_interrupt_scheme(pf);
15911 for (i = 0; i < pf->num_alloc_vsi; i++) {
15913 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15914 i40e_vsi_clear_rings(pf->vsi[i]);
15915 i40e_vsi_clear(pf->vsi[i]);
15921 for (i = 0; i < I40E_MAX_VEB; i++) {
15926 kfree(pf->qp_pile);
15929 iounmap(hw->hw_addr);
15931 pci_release_mem_regions(pdev);
15933 pci_disable_pcie_error_reporting(pdev);
15934 pci_disable_device(pdev);
15938 * i40e_pci_error_detected - warning that something funky happened in PCI land
15939 * @pdev: PCI device information struct
15940 * @error: the type of PCI error
15942 * Called to warn that something happened and the error handling steps
15943 * are in progress. Allows the driver to quiesce things, be ready for
15946 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15947 pci_channel_state_t error)
15949 struct i40e_pf *pf = pci_get_drvdata(pdev);
15951 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15954 dev_info(&pdev->dev,
15955 "Cannot recover - error happened during device probe\n");
15956 return PCI_ERS_RESULT_DISCONNECT;
15959 /* shutdown all operations */
15960 if (!test_bit(__I40E_SUSPENDED, pf->state))
15961 i40e_prep_for_reset(pf);
15963 /* Request a slot reset */
15964 return PCI_ERS_RESULT_NEED_RESET;
15968 * i40e_pci_error_slot_reset - a PCI slot reset just happened
15969 * @pdev: PCI device information struct
15971 * Called to find if the driver can work with the device now that
15972 * the pci slot has been reset. If a basic connection seems good
15973 * (registers are readable and have sane content) then return a
15974 * happy little PCI_ERS_RESULT_xxx.
15976 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15978 struct i40e_pf *pf = pci_get_drvdata(pdev);
15979 pci_ers_result_t result;
15982 dev_dbg(&pdev->dev, "%s\n", __func__);
15983 if (pci_enable_device_mem(pdev)) {
15984 dev_info(&pdev->dev,
15985 "Cannot re-enable PCI device after reset.\n");
15986 result = PCI_ERS_RESULT_DISCONNECT;
15988 pci_set_master(pdev);
15989 pci_restore_state(pdev);
15990 pci_save_state(pdev);
15991 pci_wake_from_d3(pdev, false);
15993 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15995 result = PCI_ERS_RESULT_RECOVERED;
15997 result = PCI_ERS_RESULT_DISCONNECT;
16004 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16005 * @pdev: PCI device information struct
16007 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16009 struct i40e_pf *pf = pci_get_drvdata(pdev);
16011 i40e_prep_for_reset(pf);
16015 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16016 * @pdev: PCI device information struct
16018 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16020 struct i40e_pf *pf = pci_get_drvdata(pdev);
16022 i40e_reset_and_rebuild(pf, false, false);
16026 * i40e_pci_error_resume - restart operations after PCI error recovery
16027 * @pdev: PCI device information struct
16029 * Called to allow the driver to bring things back up after PCI error
16030 * and/or reset recovery has finished.
16032 static void i40e_pci_error_resume(struct pci_dev *pdev)
16034 struct i40e_pf *pf = pci_get_drvdata(pdev);
16036 dev_dbg(&pdev->dev, "%s\n", __func__);
16037 if (test_bit(__I40E_SUSPENDED, pf->state))
16040 i40e_handle_reset_warning(pf, false);
16044 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16045 * using the mac_address_write admin q function
16046 * @pf: pointer to i40e_pf struct
16048 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16050 struct i40e_hw *hw = &pf->hw;
16055 /* Get current MAC address in case it's an LAA */
16056 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16057 ether_addr_copy(mac_addr,
16058 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16060 dev_err(&pf->pdev->dev,
16061 "Failed to retrieve MAC address; using default\n");
16062 ether_addr_copy(mac_addr, hw->mac.addr);
16065 /* The FW expects the mac address write cmd to first be called with
16066 * one of these flags before calling it again with the multicast
16069 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16071 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16072 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16074 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16076 dev_err(&pf->pdev->dev,
16077 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16081 flags = I40E_AQC_MC_MAG_EN
16082 | I40E_AQC_WOL_PRESERVE_ON_PFR
16083 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16084 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16086 dev_err(&pf->pdev->dev,
16087 "Failed to enable Multicast Magic Packet wake up\n");
16091 * i40e_shutdown - PCI callback for shutting down
16092 * @pdev: PCI device information struct
16094 static void i40e_shutdown(struct pci_dev *pdev)
16096 struct i40e_pf *pf = pci_get_drvdata(pdev);
16097 struct i40e_hw *hw = &pf->hw;
16099 set_bit(__I40E_SUSPENDED, pf->state);
16100 set_bit(__I40E_DOWN, pf->state);
16102 del_timer_sync(&pf->service_timer);
16103 cancel_work_sync(&pf->service_task);
16104 i40e_cloud_filter_exit(pf);
16105 i40e_fdir_teardown(pf);
16107 /* Client close must be called explicitly here because the timer
16108 * has been stopped.
16110 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16112 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16113 i40e_enable_mc_magic_wake(pf);
16115 i40e_prep_for_reset(pf);
16117 wr32(hw, I40E_PFPM_APM,
16118 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16119 wr32(hw, I40E_PFPM_WUFC,
16120 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16122 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16123 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16124 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16125 free_irq(pf->pdev->irq, pf);
16127 /* Since we're going to destroy queues during the
16128 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16132 i40e_clear_interrupt_scheme(pf);
16135 if (system_state == SYSTEM_POWER_OFF) {
16136 pci_wake_from_d3(pdev, pf->wol_en);
16137 pci_set_power_state(pdev, PCI_D3hot);
16142 * i40e_suspend - PM callback for moving to D3
16143 * @dev: generic device information structure
16145 static int __maybe_unused i40e_suspend(struct device *dev)
16147 struct i40e_pf *pf = dev_get_drvdata(dev);
16148 struct i40e_hw *hw = &pf->hw;
16150 /* If we're already suspended, then there is nothing to do */
16151 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16154 set_bit(__I40E_DOWN, pf->state);
16156 /* Ensure service task will not be running */
16157 del_timer_sync(&pf->service_timer);
16158 cancel_work_sync(&pf->service_task);
16160 /* Client close must be called explicitly here because the timer
16161 * has been stopped.
16163 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16165 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16166 i40e_enable_mc_magic_wake(pf);
16168 /* Since we're going to destroy queues during the
16169 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16174 i40e_prep_for_reset(pf);
16176 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16177 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16179 /* Clear the interrupt scheme and release our IRQs so that the system
16180 * can safely hibernate even when there are a large number of CPUs.
16181 * Otherwise hibernation might fail when mapping all the vectors back
16184 i40e_clear_interrupt_scheme(pf);
16192 * i40e_resume - PM callback for waking up from D3
16193 * @dev: generic device information structure
16195 static int __maybe_unused i40e_resume(struct device *dev)
16197 struct i40e_pf *pf = dev_get_drvdata(dev);
16200 /* If we're not suspended, then there is nothing to do */
16201 if (!test_bit(__I40E_SUSPENDED, pf->state))
16204 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16205 * since we're going to be restoring queues
16209 /* We cleared the interrupt scheme when we suspended, so we need to
16210 * restore it now to resume device functionality.
16212 err = i40e_restore_interrupt_scheme(pf);
16214 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16218 clear_bit(__I40E_DOWN, pf->state);
16219 i40e_reset_and_rebuild(pf, false, true);
16223 /* Clear suspended state last after everything is recovered */
16224 clear_bit(__I40E_SUSPENDED, pf->state);
16226 /* Restart the service task */
16227 mod_timer(&pf->service_timer,
16228 round_jiffies(jiffies + pf->service_timer_period));
16233 static const struct pci_error_handlers i40e_err_handler = {
16234 .error_detected = i40e_pci_error_detected,
16235 .slot_reset = i40e_pci_error_slot_reset,
16236 .reset_prepare = i40e_pci_error_reset_prepare,
16237 .reset_done = i40e_pci_error_reset_done,
16238 .resume = i40e_pci_error_resume,
16241 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16243 static struct pci_driver i40e_driver = {
16244 .name = i40e_driver_name,
16245 .id_table = i40e_pci_tbl,
16246 .probe = i40e_probe,
16247 .remove = i40e_remove,
16249 .pm = &i40e_pm_ops,
16251 .shutdown = i40e_shutdown,
16252 .err_handler = &i40e_err_handler,
16253 .sriov_configure = i40e_pci_sriov_configure,
16257 * i40e_init_module - Driver registration routine
16259 * i40e_init_module is the first routine called when the driver is
16260 * loaded. All it does is register with the PCI subsystem.
16262 static int __init i40e_init_module(void)
16264 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16265 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16267 /* There is no need to throttle the number of active tasks because
16268 * each device limits its own task using a state bit for scheduling
16269 * the service task, and the device tasks do not interfere with each
16270 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16271 * since we need to be able to guarantee forward progress even under
16274 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16276 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16281 return pci_register_driver(&i40e_driver);
16283 module_init(i40e_init_module);
16286 * i40e_exit_module - Driver exit cleanup routine
16288 * i40e_exit_module is called just before the driver is removed
16291 static void __exit i40e_exit_module(void)
16293 pci_unregister_driver(&i40e_driver);
16294 destroy_workqueue(i40e_wq);
16297 module_exit(i40e_exit_module);