1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
4 #include <generated/utsrelease.h>
5 #include <linux/crash_dump.h>
6 #include <linux/if_bridge.h>
7 #include <linux/if_macvlan.h>
8 #include <linux/module.h>
9 #include <net/pkt_cls.h>
10 #include <net/xdp_sock_drv.h>
14 #include "i40e_devids.h"
15 #include "i40e_diag.h"
16 #include "i40e_lan_hmc.h"
17 #include "i40e_virtchnl_pf.h"
20 /* All i40e tracepoints are defined by the include below, which
21 * must be included exactly once across the whole kernel with
22 * CREATE_TRACE_POINTS defined
24 #define CREATE_TRACE_POINTS
25 #include "i40e_trace.h"
27 const char i40e_driver_name[] = "i40e";
28 static const char i40e_driver_string[] =
29 "Intel(R) Ethernet Connection XL710 Network Driver";
31 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
33 /* a bit of forward declarations */
34 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
35 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
36 static int i40e_add_vsi(struct i40e_vsi *vsi);
37 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
38 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
39 static int i40e_setup_misc_vector(struct i40e_pf *pf);
40 static void i40e_determine_queue_usage(struct i40e_pf *pf);
41 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
42 static void i40e_prep_for_reset(struct i40e_pf *pf);
43 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
45 static int i40e_reset(struct i40e_pf *pf);
46 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
47 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
48 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
49 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
50 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
51 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
52 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
53 static int i40e_get_capabilities(struct i40e_pf *pf,
54 enum i40e_admin_queue_opc list_type);
55 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
57 /* i40e_pci_tbl - PCI Device ID Table
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
64 static const struct pci_device_id i40e_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91 /* required last entry */
94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
96 #define I40E_MAX_VF_COUNT 128
97 static int debug = -1;
98 module_param(debug, uint, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103 MODULE_LICENSE("GPL v2");
105 static struct workqueue_struct *i40e_wq;
107 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
108 struct net_device *netdev, int delta)
110 struct netdev_hw_addr_list *ha_list;
111 struct netdev_hw_addr *ha;
116 if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
117 ha_list = &netdev->uc;
119 ha_list = &netdev->mc;
121 netdev_hw_addr_list_for_each(ha, ha_list) {
122 if (ether_addr_equal(ha->addr, f->macaddr)) {
123 ha->refcount += delta;
124 if (ha->refcount <= 0)
132 * i40e_hw_to_dev - get device pointer from the hardware structure
133 * @hw: pointer to the device HW structure
135 struct device *i40e_hw_to_dev(struct i40e_hw *hw)
137 struct i40e_pf *pf = i40e_hw_to_pf(hw);
139 return &pf->pdev->dev;
143 * i40e_allocate_dma_mem - OS specific memory alloc for shared code
144 * @hw: pointer to the HW structure
145 * @mem: ptr to mem struct to fill out
146 * @size: size of memory requested
147 * @alignment: what to align the allocation to
149 int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
150 u64 size, u32 alignment)
152 struct i40e_pf *pf = i40e_hw_to_pf(hw);
154 mem->size = ALIGN(size, alignment);
155 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
164 * i40e_free_dma_mem - OS specific memory free for shared code
165 * @hw: pointer to the HW structure
166 * @mem: ptr to mem struct to free
168 int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
170 struct i40e_pf *pf = i40e_hw_to_pf(hw);
172 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
181 * i40e_allocate_virt_mem - OS specific memory alloc for shared code
182 * @hw: pointer to the HW structure
183 * @mem: ptr to mem struct to fill out
184 * @size: size of memory requested
186 int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
190 mem->va = kzalloc(size, GFP_KERNEL);
199 * i40e_free_virt_mem - OS specific memory free for shared code
200 * @hw: pointer to the HW structure
201 * @mem: ptr to mem struct to free
203 int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
205 /* it's ok to kfree a NULL pointer */
214 * i40e_get_lump - find a lump of free generic resource
215 * @pf: board private structure
216 * @pile: the pile of resource to search
217 * @needed: the number of items needed
218 * @id: an owner id to stick on the items assigned
220 * Returns the base item index of the lump, or negative for error
222 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
228 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
229 dev_info(&pf->pdev->dev,
230 "param err: pile=%s needed=%d id=0x%04x\n",
231 pile ? "<valid>" : "<null>", needed, id);
235 /* Allocate last queue in the pile for FDIR VSI queue
236 * so it doesn't fragment the qp_pile
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
239 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
240 dev_err(&pf->pdev->dev,
241 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
242 pile->num_entries - 1);
245 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
246 return pile->num_entries - 1;
250 while (i < pile->num_entries) {
251 /* skip already allocated entries */
252 if (pile->list[i] & I40E_PILE_VALID_BIT) {
257 /* do we have enough in this lump? */
258 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
259 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
264 /* there was enough, so assign it to the requestor */
265 for (j = 0; j < needed; j++)
266 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
271 /* not enough, so skip over it and continue looking */
279 * i40e_put_lump - return a lump of generic resource
280 * @pile: the pile of resource to search
281 * @index: the base item index
282 * @id: the owner id of the items assigned
284 * Returns the count of items in the lump
286 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
288 int valid_id = (id | I40E_PILE_VALID_BIT);
292 if (!pile || index >= pile->num_entries)
296 i < pile->num_entries && pile->list[i] == valid_id;
307 * i40e_find_vsi_from_id - searches for the vsi with the given id
308 * @pf: the pf structure to search for the vsi
309 * @id: id of the vsi it is searching for
311 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
313 struct i40e_vsi *vsi;
316 i40e_pf_for_each_vsi(pf, i, vsi)
324 * i40e_service_event_schedule - Schedule the service task to wake up
325 * @pf: board private structure
327 * If not already scheduled, this puts the task into the work queue
329 void i40e_service_event_schedule(struct i40e_pf *pf)
331 if ((!test_bit(__I40E_DOWN, pf->state) &&
332 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
333 test_bit(__I40E_RECOVERY_MODE, pf->state))
334 queue_work(i40e_wq, &pf->service_task);
338 * i40e_tx_timeout - Respond to a Tx Hang
339 * @netdev: network interface device structure
340 * @txqueue: queue number timing out
342 * If any port has noticed a Tx timeout, it is likely that the whole
343 * device is munged, not just the one netdev port, so go for the full
346 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
348 struct i40e_netdev_priv *np = netdev_priv(netdev);
349 struct i40e_vsi *vsi = np->vsi;
350 struct i40e_pf *pf = vsi->back;
351 struct i40e_ring *tx_ring = NULL;
355 pf->tx_timeout_count++;
357 /* with txqueue index, find the tx_ring struct */
358 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
361 vsi->tx_rings[i]->queue_index) {
362 tx_ring = vsi->tx_rings[i];
368 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
369 pf->tx_timeout_recovery_level = 1; /* reset after some time */
370 else if (time_before(jiffies,
371 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
372 return; /* don't do any new action before the next timeout */
374 /* don't kick off another recovery if one is already pending */
375 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
379 head = i40e_get_head(tx_ring);
380 /* Read interrupt register */
381 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
383 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
384 tx_ring->vsi->base_vector - 1));
386 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
388 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
389 vsi->seid, txqueue, tx_ring->next_to_clean,
390 head, tx_ring->next_to_use,
391 readl(tx_ring->tail), val);
394 pf->tx_timeout_last_recovery = jiffies;
395 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
396 pf->tx_timeout_recovery_level, txqueue);
398 switch (pf->tx_timeout_recovery_level) {
400 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
403 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
406 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
409 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
410 set_bit(__I40E_DOWN_REQUESTED, pf->state);
411 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
415 i40e_service_event_schedule(pf);
416 pf->tx_timeout_recovery_level++;
420 * i40e_get_vsi_stats_struct - Get System Network Statistics
421 * @vsi: the VSI we care about
423 * Returns the address of the device statistics structure.
424 * The statistics are actually updated from the service task.
426 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
428 return &vsi->net_stats;
432 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
433 * @ring: Tx ring to get statistics from
434 * @stats: statistics entry to be updated
436 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
437 struct rtnl_link_stats64 *stats)
443 start = u64_stats_fetch_begin(&ring->syncp);
444 packets = ring->stats.packets;
445 bytes = ring->stats.bytes;
446 } while (u64_stats_fetch_retry(&ring->syncp, start));
448 stats->tx_packets += packets;
449 stats->tx_bytes += bytes;
453 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
454 * @netdev: network interface device structure
455 * @stats: data structure to store statistics
457 * Returns the address of the device statistics structure.
458 * The statistics are actually updated from the service task.
460 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
461 struct rtnl_link_stats64 *stats)
463 struct i40e_netdev_priv *np = netdev_priv(netdev);
464 struct i40e_vsi *vsi = np->vsi;
465 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
466 struct i40e_ring *ring;
469 if (test_bit(__I40E_VSI_DOWN, vsi->state))
476 for (i = 0; i < vsi->num_queue_pairs; i++) {
480 ring = READ_ONCE(vsi->tx_rings[i]);
483 i40e_get_netdev_stats_struct_tx(ring, stats);
485 if (i40e_enabled_xdp_vsi(vsi)) {
486 ring = READ_ONCE(vsi->xdp_rings[i]);
489 i40e_get_netdev_stats_struct_tx(ring, stats);
492 ring = READ_ONCE(vsi->rx_rings[i]);
496 start = u64_stats_fetch_begin(&ring->syncp);
497 packets = ring->stats.packets;
498 bytes = ring->stats.bytes;
499 } while (u64_stats_fetch_retry(&ring->syncp, start));
501 stats->rx_packets += packets;
502 stats->rx_bytes += bytes;
507 /* following stats updated by i40e_watchdog_subtask() */
508 stats->multicast = vsi_stats->multicast;
509 stats->tx_errors = vsi_stats->tx_errors;
510 stats->tx_dropped = vsi_stats->tx_dropped;
511 stats->rx_errors = vsi_stats->rx_errors;
512 stats->rx_dropped = vsi_stats->rx_dropped;
513 stats->rx_missed_errors = vsi_stats->rx_missed_errors;
514 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
515 stats->rx_length_errors = vsi_stats->rx_length_errors;
519 * i40e_vsi_reset_stats - Resets all stats of the given vsi
520 * @vsi: the VSI to have its stats reset
522 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
524 struct rtnl_link_stats64 *ns;
530 ns = i40e_get_vsi_stats_struct(vsi);
531 memset(ns, 0, sizeof(*ns));
532 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
533 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
534 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
535 if (vsi->rx_rings && vsi->rx_rings[0]) {
536 for (i = 0; i < vsi->num_queue_pairs; i++) {
537 memset(&vsi->rx_rings[i]->stats, 0,
538 sizeof(vsi->rx_rings[i]->stats));
539 memset(&vsi->rx_rings[i]->rx_stats, 0,
540 sizeof(vsi->rx_rings[i]->rx_stats));
541 memset(&vsi->tx_rings[i]->stats, 0,
542 sizeof(vsi->tx_rings[i]->stats));
543 memset(&vsi->tx_rings[i]->tx_stats, 0,
544 sizeof(vsi->tx_rings[i]->tx_stats));
547 vsi->stat_offsets_loaded = false;
551 * i40e_pf_reset_stats - Reset all of the stats for the given PF
552 * @pf: the PF to be reset
554 void i40e_pf_reset_stats(struct i40e_pf *pf)
556 struct i40e_veb *veb;
559 memset(&pf->stats, 0, sizeof(pf->stats));
560 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
561 pf->stat_offsets_loaded = false;
563 i40e_pf_for_each_veb(pf, i, veb) {
564 memset(&veb->stats, 0, sizeof(veb->stats));
565 memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
566 memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
567 memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
568 veb->stat_offsets_loaded = false;
570 pf->hw_csum_rx_error = 0;
574 * i40e_compute_pci_to_hw_id - compute index form PCI function.
575 * @vsi: ptr to the VSI to read from.
576 * @hw: ptr to the hardware info.
578 static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
580 int pf_count = i40e_get_pf_count(hw);
582 if (vsi->type == I40E_VSI_SRIOV)
583 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
585 return hw->port + BIT(7);
589 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
590 * @hw: ptr to the hardware info.
591 * @hireg: the high 32 bit reg to read.
592 * @loreg: the low 32 bit reg to read.
593 * @offset_loaded: has the initial offset been loaded yet.
594 * @offset: ptr to current offset value.
595 * @stat: ptr to the stat.
597 * Since the device stats are not reset at PFReset, they will not
598 * be zeroed when the driver starts. We'll save the first values read
599 * and use them as offsets to be subtracted from the raw values in order
600 * to report stats that count from zero.
602 static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
603 bool offset_loaded, u64 *offset, u64 *stat)
607 new_data = rd64(hw, loreg);
609 if (!offset_loaded || new_data < *offset)
611 *stat = new_data - *offset;
615 * i40e_stat_update48 - read and update a 48 bit stat from the chip
616 * @hw: ptr to the hardware info
617 * @hireg: the high 32 bit reg to read
618 * @loreg: the low 32 bit reg to read
619 * @offset_loaded: has the initial offset been loaded yet
620 * @offset: ptr to current offset value
621 * @stat: ptr to the stat
623 * Since the device stats are not reset at PFReset, they likely will not
624 * be zeroed when the driver starts. We'll save the first values read
625 * and use them as offsets to be subtracted from the raw values in order
626 * to report stats that count from zero. In the process, we also manage
627 * the potential roll-over.
629 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
630 bool offset_loaded, u64 *offset, u64 *stat)
634 if (hw->device_id == I40E_DEV_ID_QEMU) {
635 new_data = rd32(hw, loreg);
636 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
638 new_data = rd64(hw, loreg);
642 if (likely(new_data >= *offset))
643 *stat = new_data - *offset;
645 *stat = (new_data + BIT_ULL(48)) - *offset;
646 *stat &= 0xFFFFFFFFFFFFULL;
650 * i40e_stat_update32 - read and update a 32 bit stat from the chip
651 * @hw: ptr to the hardware info
652 * @reg: the hw reg to read
653 * @offset_loaded: has the initial offset been loaded yet
654 * @offset: ptr to current offset value
655 * @stat: ptr to the stat
657 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
658 bool offset_loaded, u64 *offset, u64 *stat)
662 new_data = rd32(hw, reg);
665 if (likely(new_data >= *offset))
666 *stat = (u32)(new_data - *offset);
668 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
672 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
673 * @hw: ptr to the hardware info
674 * @reg: the hw reg to read and clear
675 * @stat: ptr to the stat
677 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
679 u32 new_data = rd32(hw, reg);
681 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
686 * i40e_stats_update_rx_discards - update rx_discards.
687 * @vsi: ptr to the VSI to be updated.
688 * @hw: ptr to the hardware info.
689 * @stat_idx: VSI's stat_counter_idx.
690 * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
691 * @stat_offset: ptr to stat_offset to store first read of specific register.
692 * @stat: ptr to VSI's stat to be updated.
695 i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
696 int stat_idx, bool offset_loaded,
697 struct i40e_eth_stats *stat_offset,
698 struct i40e_eth_stats *stat)
700 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
701 &stat_offset->rx_discards, &stat->rx_discards);
702 i40e_stat_update64(hw,
703 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
704 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
705 offset_loaded, &stat_offset->rx_discards_other,
706 &stat->rx_discards_other);
710 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
711 * @vsi: the VSI to be updated
713 void i40e_update_eth_stats(struct i40e_vsi *vsi)
715 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
716 struct i40e_pf *pf = vsi->back;
717 struct i40e_hw *hw = &pf->hw;
718 struct i40e_eth_stats *oes;
719 struct i40e_eth_stats *es; /* device's eth stats */
721 es = &vsi->eth_stats;
722 oes = &vsi->eth_stats_offsets;
724 /* Gather up the stats that the hw collects */
725 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
726 vsi->stat_offsets_loaded,
727 &oes->tx_errors, &es->tx_errors);
728 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
729 vsi->stat_offsets_loaded,
730 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
732 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
733 I40E_GLV_GORCL(stat_idx),
734 vsi->stat_offsets_loaded,
735 &oes->rx_bytes, &es->rx_bytes);
736 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
737 I40E_GLV_UPRCL(stat_idx),
738 vsi->stat_offsets_loaded,
739 &oes->rx_unicast, &es->rx_unicast);
740 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
741 I40E_GLV_MPRCL(stat_idx),
742 vsi->stat_offsets_loaded,
743 &oes->rx_multicast, &es->rx_multicast);
744 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
745 I40E_GLV_BPRCL(stat_idx),
746 vsi->stat_offsets_loaded,
747 &oes->rx_broadcast, &es->rx_broadcast);
749 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
750 I40E_GLV_GOTCL(stat_idx),
751 vsi->stat_offsets_loaded,
752 &oes->tx_bytes, &es->tx_bytes);
753 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
754 I40E_GLV_UPTCL(stat_idx),
755 vsi->stat_offsets_loaded,
756 &oes->tx_unicast, &es->tx_unicast);
757 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
758 I40E_GLV_MPTCL(stat_idx),
759 vsi->stat_offsets_loaded,
760 &oes->tx_multicast, &es->tx_multicast);
761 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
762 I40E_GLV_BPTCL(stat_idx),
763 vsi->stat_offsets_loaded,
764 &oes->tx_broadcast, &es->tx_broadcast);
766 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
767 vsi->stat_offsets_loaded, oes, es);
769 vsi->stat_offsets_loaded = true;
773 * i40e_update_veb_stats - Update Switch component statistics
774 * @veb: the VEB being updated
776 void i40e_update_veb_stats(struct i40e_veb *veb)
778 struct i40e_pf *pf = veb->pf;
779 struct i40e_hw *hw = &pf->hw;
780 struct i40e_eth_stats *oes;
781 struct i40e_eth_stats *es; /* device's eth stats */
782 struct i40e_veb_tc_stats *veb_oes;
783 struct i40e_veb_tc_stats *veb_es;
786 idx = veb->stats_idx;
788 oes = &veb->stats_offsets;
789 veb_es = &veb->tc_stats;
790 veb_oes = &veb->tc_stats_offsets;
792 /* Gather up the stats that the hw collects */
793 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
794 veb->stat_offsets_loaded,
795 &oes->tx_discards, &es->tx_discards);
796 if (hw->revision_id > 0)
797 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
798 veb->stat_offsets_loaded,
799 &oes->rx_unknown_protocol,
800 &es->rx_unknown_protocol);
801 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
802 veb->stat_offsets_loaded,
803 &oes->rx_bytes, &es->rx_bytes);
804 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
805 veb->stat_offsets_loaded,
806 &oes->rx_unicast, &es->rx_unicast);
807 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
808 veb->stat_offsets_loaded,
809 &oes->rx_multicast, &es->rx_multicast);
810 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
811 veb->stat_offsets_loaded,
812 &oes->rx_broadcast, &es->rx_broadcast);
814 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
815 veb->stat_offsets_loaded,
816 &oes->tx_bytes, &es->tx_bytes);
817 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
818 veb->stat_offsets_loaded,
819 &oes->tx_unicast, &es->tx_unicast);
820 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
821 veb->stat_offsets_loaded,
822 &oes->tx_multicast, &es->tx_multicast);
823 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
824 veb->stat_offsets_loaded,
825 &oes->tx_broadcast, &es->tx_broadcast);
826 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
827 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
828 I40E_GLVEBTC_RPCL(i, idx),
829 veb->stat_offsets_loaded,
830 &veb_oes->tc_rx_packets[i],
831 &veb_es->tc_rx_packets[i]);
832 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
833 I40E_GLVEBTC_RBCL(i, idx),
834 veb->stat_offsets_loaded,
835 &veb_oes->tc_rx_bytes[i],
836 &veb_es->tc_rx_bytes[i]);
837 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
838 I40E_GLVEBTC_TPCL(i, idx),
839 veb->stat_offsets_loaded,
840 &veb_oes->tc_tx_packets[i],
841 &veb_es->tc_tx_packets[i]);
842 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
843 I40E_GLVEBTC_TBCL(i, idx),
844 veb->stat_offsets_loaded,
845 &veb_oes->tc_tx_bytes[i],
846 &veb_es->tc_tx_bytes[i]);
848 veb->stat_offsets_loaded = true;
852 * i40e_update_vsi_stats - Update the vsi statistics counters.
853 * @vsi: the VSI to be updated
855 * There are a few instances where we store the same stat in a
856 * couple of different structs. This is partly because we have
857 * the netdev stats that need to be filled out, which is slightly
858 * different from the "eth_stats" defined by the chip and used in
859 * VF communications. We sort it out here.
861 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
863 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
864 struct i40e_pf *pf = vsi->back;
865 struct rtnl_link_stats64 *ons;
866 struct rtnl_link_stats64 *ns; /* netdev stats */
867 struct i40e_eth_stats *oes;
868 struct i40e_eth_stats *es; /* device's eth stats */
869 u64 tx_restart, tx_busy;
880 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
881 test_bit(__I40E_CONFIG_BUSY, pf->state))
884 ns = i40e_get_vsi_stats_struct(vsi);
885 ons = &vsi->net_stats_offsets;
886 es = &vsi->eth_stats;
887 oes = &vsi->eth_stats_offsets;
889 /* Gather up the netdev and vsi stats that the driver collects
890 * on the fly during packet processing
894 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
903 for (q = 0; q < vsi->num_queue_pairs; q++) {
905 p = READ_ONCE(vsi->tx_rings[q]);
910 start = u64_stats_fetch_begin(&p->syncp);
911 packets = p->stats.packets;
912 bytes = p->stats.bytes;
913 } while (u64_stats_fetch_retry(&p->syncp, start));
916 tx_restart += p->tx_stats.restart_queue;
917 tx_busy += p->tx_stats.tx_busy;
918 tx_linearize += p->tx_stats.tx_linearize;
919 tx_force_wb += p->tx_stats.tx_force_wb;
920 tx_stopped += p->tx_stats.tx_stopped;
923 p = READ_ONCE(vsi->rx_rings[q]);
928 start = u64_stats_fetch_begin(&p->syncp);
929 packets = p->stats.packets;
930 bytes = p->stats.bytes;
931 } while (u64_stats_fetch_retry(&p->syncp, start));
934 rx_buf += p->rx_stats.alloc_buff_failed;
935 rx_page += p->rx_stats.alloc_page_failed;
936 rx_reuse += p->rx_stats.page_reuse_count;
937 rx_alloc += p->rx_stats.page_alloc_count;
938 rx_waive += p->rx_stats.page_waive_count;
939 rx_busy += p->rx_stats.page_busy_count;
941 if (i40e_enabled_xdp_vsi(vsi)) {
942 /* locate XDP ring */
943 p = READ_ONCE(vsi->xdp_rings[q]);
948 start = u64_stats_fetch_begin(&p->syncp);
949 packets = p->stats.packets;
950 bytes = p->stats.bytes;
951 } while (u64_stats_fetch_retry(&p->syncp, start));
954 tx_restart += p->tx_stats.restart_queue;
955 tx_busy += p->tx_stats.tx_busy;
956 tx_linearize += p->tx_stats.tx_linearize;
957 tx_force_wb += p->tx_stats.tx_force_wb;
961 vsi->tx_restart = tx_restart;
962 vsi->tx_busy = tx_busy;
963 vsi->tx_linearize = tx_linearize;
964 vsi->tx_force_wb = tx_force_wb;
965 vsi->tx_stopped = tx_stopped;
966 vsi->rx_page_failed = rx_page;
967 vsi->rx_buf_failed = rx_buf;
968 vsi->rx_page_reuse = rx_reuse;
969 vsi->rx_page_alloc = rx_alloc;
970 vsi->rx_page_waive = rx_waive;
971 vsi->rx_page_busy = rx_busy;
973 ns->rx_packets = rx_p;
975 ns->tx_packets = tx_p;
978 /* update netdev stats from eth stats */
979 i40e_update_eth_stats(vsi);
980 ons->tx_errors = oes->tx_errors;
981 ns->tx_errors = es->tx_errors;
982 ons->multicast = oes->rx_multicast;
983 ns->multicast = es->rx_multicast;
984 ons->rx_dropped = oes->rx_discards_other;
985 ns->rx_dropped = es->rx_discards_other;
986 ons->rx_missed_errors = oes->rx_discards;
987 ns->rx_missed_errors = es->rx_discards;
988 ons->tx_dropped = oes->tx_discards;
989 ns->tx_dropped = es->tx_discards;
991 /* pull in a couple PF stats if this is the main vsi */
992 if (vsi == pf->vsi[pf->lan_vsi]) {
993 ns->rx_crc_errors = pf->stats.crc_errors;
994 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
995 ns->rx_length_errors = pf->stats.rx_length_errors;
1000 * i40e_update_pf_stats - Update the PF statistics counters.
1001 * @pf: the PF to be updated
1003 static void i40e_update_pf_stats(struct i40e_pf *pf)
1005 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1006 struct i40e_hw_port_stats *nsd = &pf->stats;
1007 struct i40e_hw *hw = &pf->hw;
1011 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1012 I40E_GLPRT_GORCL(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1015 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1016 I40E_GLPRT_GOTCL(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1019 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->eth.rx_discards,
1022 &nsd->eth.rx_discards);
1023 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1024 I40E_GLPRT_UPRCL(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->eth.rx_unicast,
1027 &nsd->eth.rx_unicast);
1028 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1029 I40E_GLPRT_MPRCL(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->eth.rx_multicast,
1032 &nsd->eth.rx_multicast);
1033 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1034 I40E_GLPRT_BPRCL(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->eth.rx_broadcast,
1037 &nsd->eth.rx_broadcast);
1038 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1039 I40E_GLPRT_UPTCL(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->eth.tx_unicast,
1042 &nsd->eth.tx_unicast);
1043 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1044 I40E_GLPRT_MPTCL(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->eth.tx_multicast,
1047 &nsd->eth.tx_multicast);
1048 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1049 I40E_GLPRT_BPTCL(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->eth.tx_broadcast,
1052 &nsd->eth.tx_broadcast);
1054 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1055 pf->stat_offsets_loaded,
1056 &osd->tx_dropped_link_down,
1057 &nsd->tx_dropped_link_down);
1059 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1060 pf->stat_offsets_loaded,
1061 &osd->crc_errors, &nsd->crc_errors);
1063 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1064 pf->stat_offsets_loaded,
1065 &osd->illegal_bytes, &nsd->illegal_bytes);
1067 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1068 pf->stat_offsets_loaded,
1069 &osd->mac_local_faults,
1070 &nsd->mac_local_faults);
1071 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->mac_remote_faults,
1074 &nsd->mac_remote_faults);
1076 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1077 pf->stat_offsets_loaded,
1078 &osd->rx_length_errors,
1079 &nsd->rx_length_errors);
1081 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1082 pf->stat_offsets_loaded,
1083 &osd->link_xon_rx, &nsd->link_xon_rx);
1084 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1085 pf->stat_offsets_loaded,
1086 &osd->link_xon_tx, &nsd->link_xon_tx);
1087 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1088 pf->stat_offsets_loaded,
1089 &osd->link_xoff_rx, &nsd->link_xoff_rx);
1090 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1091 pf->stat_offsets_loaded,
1092 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1094 for (i = 0; i < 8; i++) {
1095 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1096 pf->stat_offsets_loaded,
1097 &osd->priority_xoff_rx[i],
1098 &nsd->priority_xoff_rx[i]);
1099 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1100 pf->stat_offsets_loaded,
1101 &osd->priority_xon_rx[i],
1102 &nsd->priority_xon_rx[i]);
1103 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1104 pf->stat_offsets_loaded,
1105 &osd->priority_xon_tx[i],
1106 &nsd->priority_xon_tx[i]);
1107 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1108 pf->stat_offsets_loaded,
1109 &osd->priority_xoff_tx[i],
1110 &nsd->priority_xoff_tx[i]);
1111 i40e_stat_update32(hw,
1112 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1113 pf->stat_offsets_loaded,
1114 &osd->priority_xon_2_xoff[i],
1115 &nsd->priority_xon_2_xoff[i]);
1118 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1119 I40E_GLPRT_PRC64L(hw->port),
1120 pf->stat_offsets_loaded,
1121 &osd->rx_size_64, &nsd->rx_size_64);
1122 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1123 I40E_GLPRT_PRC127L(hw->port),
1124 pf->stat_offsets_loaded,
1125 &osd->rx_size_127, &nsd->rx_size_127);
1126 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1127 I40E_GLPRT_PRC255L(hw->port),
1128 pf->stat_offsets_loaded,
1129 &osd->rx_size_255, &nsd->rx_size_255);
1130 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1131 I40E_GLPRT_PRC511L(hw->port),
1132 pf->stat_offsets_loaded,
1133 &osd->rx_size_511, &nsd->rx_size_511);
1134 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1135 I40E_GLPRT_PRC1023L(hw->port),
1136 pf->stat_offsets_loaded,
1137 &osd->rx_size_1023, &nsd->rx_size_1023);
1138 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1139 I40E_GLPRT_PRC1522L(hw->port),
1140 pf->stat_offsets_loaded,
1141 &osd->rx_size_1522, &nsd->rx_size_1522);
1142 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1143 I40E_GLPRT_PRC9522L(hw->port),
1144 pf->stat_offsets_loaded,
1145 &osd->rx_size_big, &nsd->rx_size_big);
1147 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1148 I40E_GLPRT_PTC64L(hw->port),
1149 pf->stat_offsets_loaded,
1150 &osd->tx_size_64, &nsd->tx_size_64);
1151 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1152 I40E_GLPRT_PTC127L(hw->port),
1153 pf->stat_offsets_loaded,
1154 &osd->tx_size_127, &nsd->tx_size_127);
1155 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1156 I40E_GLPRT_PTC255L(hw->port),
1157 pf->stat_offsets_loaded,
1158 &osd->tx_size_255, &nsd->tx_size_255);
1159 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1160 I40E_GLPRT_PTC511L(hw->port),
1161 pf->stat_offsets_loaded,
1162 &osd->tx_size_511, &nsd->tx_size_511);
1163 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1164 I40E_GLPRT_PTC1023L(hw->port),
1165 pf->stat_offsets_loaded,
1166 &osd->tx_size_1023, &nsd->tx_size_1023);
1167 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1168 I40E_GLPRT_PTC1522L(hw->port),
1169 pf->stat_offsets_loaded,
1170 &osd->tx_size_1522, &nsd->tx_size_1522);
1171 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1172 I40E_GLPRT_PTC9522L(hw->port),
1173 pf->stat_offsets_loaded,
1174 &osd->tx_size_big, &nsd->tx_size_big);
1176 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1177 pf->stat_offsets_loaded,
1178 &osd->rx_undersize, &nsd->rx_undersize);
1179 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1180 pf->stat_offsets_loaded,
1181 &osd->rx_fragments, &nsd->rx_fragments);
1182 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1183 pf->stat_offsets_loaded,
1184 &osd->rx_oversize, &nsd->rx_oversize);
1185 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1186 pf->stat_offsets_loaded,
1187 &osd->rx_jabber, &nsd->rx_jabber);
1190 i40e_stat_update_and_clear32(hw,
1191 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1192 &nsd->fd_atr_match);
1193 i40e_stat_update_and_clear32(hw,
1194 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1196 i40e_stat_update_and_clear32(hw,
1197 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1198 &nsd->fd_atr_tunnel_match);
1200 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1201 nsd->tx_lpi_status =
1202 FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val);
1203 nsd->rx_lpi_status =
1204 FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val);
1205 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1206 pf->stat_offsets_loaded,
1207 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1208 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1209 pf->stat_offsets_loaded,
1210 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1212 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
1213 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1214 nsd->fd_sb_status = true;
1216 nsd->fd_sb_status = false;
1218 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
1219 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1220 nsd->fd_atr_status = true;
1222 nsd->fd_atr_status = false;
1224 pf->stat_offsets_loaded = true;
1228 * i40e_update_stats - Update the various statistics counters.
1229 * @vsi: the VSI to be updated
1231 * Update the various stats for this VSI and its related entities.
1233 void i40e_update_stats(struct i40e_vsi *vsi)
1235 struct i40e_pf *pf = vsi->back;
1237 if (vsi == pf->vsi[pf->lan_vsi])
1238 i40e_update_pf_stats(pf);
1240 i40e_update_vsi_stats(vsi);
1244 * i40e_count_filters - counts VSI mac filters
1245 * @vsi: the VSI to be searched
1247 * Returns count of mac filters
1249 int i40e_count_filters(struct i40e_vsi *vsi)
1251 struct i40e_mac_filter *f;
1252 struct hlist_node *h;
1256 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1263 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1264 * @vsi: the VSI to be searched
1265 * @macaddr: the MAC address
1268 * Returns ptr to the filter object or NULL
1270 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1271 const u8 *macaddr, s16 vlan)
1273 struct i40e_mac_filter *f;
1276 if (!vsi || !macaddr)
1279 key = i40e_addr_to_hkey(macaddr);
1280 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1281 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1289 * i40e_find_mac - Find a mac addr in the macvlan filters list
1290 * @vsi: the VSI to be searched
1291 * @macaddr: the MAC address we are searching for
1293 * Returns the first filter with the provided MAC address or NULL if
1294 * MAC address was not found
1296 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1298 struct i40e_mac_filter *f;
1301 if (!vsi || !macaddr)
1304 key = i40e_addr_to_hkey(macaddr);
1305 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1306 if ((ether_addr_equal(macaddr, f->macaddr)))
1313 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1314 * @vsi: the VSI to be searched
1316 * Returns true if VSI is in vlan mode or false otherwise
1318 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1320 /* If we have a PVID, always operate in VLAN mode */
1324 /* We need to operate in VLAN mode whenever we have any filters with
1325 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1326 * time, incurring search cost repeatedly. However, we can notice two
1329 * 1) the only place where we can gain a VLAN filter is in
1332 * 2) the only place where filters are actually removed is in
1333 * i40e_sync_filters_subtask.
1335 * Thus, we can simply use a boolean value, has_vlan_filters which we
1336 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1337 * we have to perform the full search after deleting filters in
1338 * i40e_sync_filters_subtask, but we already have to search
1339 * filters here and can perform the check at the same time. This
1340 * results in avoiding embedding a loop for VLAN mode inside another
1341 * loop over all the filters, and should maintain correctness as noted
1344 return vsi->has_vlan_filter;
1348 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1349 * @vsi: the VSI to configure
1350 * @tmp_add_list: list of filters ready to be added
1351 * @tmp_del_list: list of filters ready to be deleted
1352 * @vlan_filters: the number of active VLAN filters
1354 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1355 * behave as expected. If we have any active VLAN filters remaining or about
1356 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1357 * so that they only match against untagged traffic. If we no longer have any
1358 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1359 * so that they match against both tagged and untagged traffic. In this way,
1360 * we ensure that we correctly receive the desired traffic. This ensures that
1361 * when we have an active VLAN we will receive only untagged traffic and
1362 * traffic matching active VLANs. If we have no active VLANs then we will
1363 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1365 * Finally, in a similar fashion, this function also corrects filters when
1366 * there is an active PVID assigned to this VSI.
1368 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1370 * This function is only expected to be called from within
1371 * i40e_sync_vsi_filters.
1373 * NOTE: This function expects to be called while under the
1374 * mac_filter_hash_lock
1376 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1377 struct hlist_head *tmp_add_list,
1378 struct hlist_head *tmp_del_list,
1381 s16 pvid = le16_to_cpu(vsi->info.pvid);
1382 struct i40e_mac_filter *f, *add_head;
1383 struct i40e_new_mac_filter *new;
1384 struct hlist_node *h;
1387 /* To determine if a particular filter needs to be replaced we
1388 * have the three following conditions:
1390 * a) if we have a PVID assigned, then all filters which are
1391 * not marked as VLAN=PVID must be replaced with filters that
1393 * b) otherwise, if we have any active VLANS, all filters
1394 * which are marked as VLAN=-1 must be replaced with
1395 * filters marked as VLAN=0
1396 * c) finally, if we do not have any active VLANS, all filters
1397 * which are marked as VLAN=0 must be replaced with filters
1401 /* Update the filters about to be added in place */
1402 hlist_for_each_entry(new, tmp_add_list, hlist) {
1403 if (pvid && new->f->vlan != pvid)
1404 new->f->vlan = pvid;
1405 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1407 else if (!vlan_filters && new->f->vlan == 0)
1408 new->f->vlan = I40E_VLAN_ANY;
1411 /* Update the remaining active filters */
1412 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1413 /* Combine the checks for whether a filter needs to be changed
1414 * and then determine the new VLAN inside the if block, in
1415 * order to avoid duplicating code for adding the new filter
1416 * then deleting the old filter.
1418 if ((pvid && f->vlan != pvid) ||
1419 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1420 (!vlan_filters && f->vlan == 0)) {
1421 /* Determine the new vlan we will be adding */
1424 else if (vlan_filters)
1427 new_vlan = I40E_VLAN_ANY;
1429 /* Create the new filter */
1430 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1434 /* Create a temporary i40e_new_mac_filter */
1435 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1440 new->state = add_head->state;
1442 /* Add the new filter to the tmp list */
1443 hlist_add_head(&new->hlist, tmp_add_list);
1445 /* Put the original filter into the delete list */
1446 f->state = I40E_FILTER_REMOVE;
1447 hash_del(&f->hlist);
1448 hlist_add_head(&f->hlist, tmp_del_list);
1452 vsi->has_vlan_filter = !!vlan_filters;
1458 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1459 * @vsi: the vsi to configure
1460 * @new_mac: new mac filter to be added
1461 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1462 * @vlan_filters: the number of active VLAN filters
1463 * @trusted: flag if the VF is trusted
1465 * Get new VLAN id based on current VLAN filters, trust, PVID
1466 * and vf-vlan-prune-disable flag.
1468 * Returns the value of the new vlan filter or
1469 * the old value if no new filter is needed.
1471 static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1472 struct i40e_new_mac_filter *new_mac,
1473 struct i40e_mac_filter *f,
1477 s16 pvid = le16_to_cpu(vsi->info.pvid);
1478 struct i40e_pf *pf = vsi->back;
1484 if (pvid && f->vlan != pvid)
1487 is_any = (trusted ||
1488 !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags));
1490 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1491 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1492 (is_any && !vlan_filters && f->vlan == 0)) {
1494 return I40E_VLAN_ANY;
1503 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1504 * @vsi: the vsi to configure
1505 * @tmp_add_list: list of filters ready to be added
1506 * @tmp_del_list: list of filters ready to be deleted
1507 * @vlan_filters: the number of active VLAN filters
1508 * @trusted: flag if the VF is trusted
1510 * Correct VF VLAN filters based on current VLAN filters, trust, PVID
1511 * and vf-vlan-prune-disable flag.
1513 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1515 * This function is only expected to be called from within
1516 * i40e_sync_vsi_filters.
1518 * NOTE: This function expects to be called while under the
1519 * mac_filter_hash_lock
1521 static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1522 struct hlist_head *tmp_add_list,
1523 struct hlist_head *tmp_del_list,
1527 struct i40e_mac_filter *f, *add_head;
1528 struct i40e_new_mac_filter *new_mac;
1529 struct hlist_node *h;
1532 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1533 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1534 vlan_filters, trusted);
1537 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1538 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1540 if (new_vlan != f->vlan) {
1541 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1544 /* Create a temporary i40e_new_mac_filter */
1545 new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
1548 new_mac->f = add_head;
1549 new_mac->state = add_head->state;
1551 /* Add the new filter to the tmp list */
1552 hlist_add_head(&new_mac->hlist, tmp_add_list);
1554 /* Put the original filter into the delete list */
1555 f->state = I40E_FILTER_REMOVE;
1556 hash_del(&f->hlist);
1557 hlist_add_head(&f->hlist, tmp_del_list);
1561 vsi->has_vlan_filter = !!vlan_filters;
1566 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1567 * @vsi: the PF Main VSI - inappropriate for any other VSI
1568 * @macaddr: the MAC address
1570 * Remove whatever filter the firmware set up so the driver can manage
1571 * its own filtering intelligently.
1573 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1575 struct i40e_aqc_remove_macvlan_element_data element;
1576 struct i40e_pf *pf = vsi->back;
1578 /* Only appropriate for the PF main VSI */
1579 if (vsi->type != I40E_VSI_MAIN)
1582 memset(&element, 0, sizeof(element));
1583 ether_addr_copy(element.mac_addr, macaddr);
1584 element.vlan_tag = 0;
1585 /* Ignore error returns, some firmware does it this way... */
1586 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1587 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1589 memset(&element, 0, sizeof(element));
1590 ether_addr_copy(element.mac_addr, macaddr);
1591 element.vlan_tag = 0;
1592 /* ...and some firmware does it this way. */
1593 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1594 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1595 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1599 * i40e_add_filter - Add a mac/vlan filter to the VSI
1600 * @vsi: the VSI to be searched
1601 * @macaddr: the MAC address
1604 * Returns ptr to the filter object or NULL when no memory available.
1606 * NOTE: This function is expected to be called with mac_filter_hash_lock
1609 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1610 const u8 *macaddr, s16 vlan)
1612 struct i40e_mac_filter *f;
1615 if (!vsi || !macaddr)
1618 f = i40e_find_filter(vsi, macaddr, vlan);
1620 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1624 /* Update the boolean indicating if we need to function in
1628 vsi->has_vlan_filter = true;
1630 ether_addr_copy(f->macaddr, macaddr);
1632 f->state = I40E_FILTER_NEW;
1633 INIT_HLIST_NODE(&f->hlist);
1635 key = i40e_addr_to_hkey(macaddr);
1636 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1638 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1639 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1642 /* If we're asked to add a filter that has been marked for removal, it
1643 * is safe to simply restore it to active state. __i40e_del_filter
1644 * will have simply deleted any filters which were previously marked
1645 * NEW or FAILED, so if it is currently marked REMOVE it must have
1646 * previously been ACTIVE. Since we haven't yet run the sync filters
1647 * task, just restore this filter to the ACTIVE state so that the
1648 * sync task leaves it in place
1650 if (f->state == I40E_FILTER_REMOVE)
1651 f->state = I40E_FILTER_ACTIVE;
1657 * __i40e_del_filter - Remove a specific filter from the VSI
1658 * @vsi: VSI to remove from
1659 * @f: the filter to remove from the list
1661 * This function should be called instead of i40e_del_filter only if you know
1662 * the exact filter you will remove already, such as via i40e_find_filter or
1665 * NOTE: This function is expected to be called with mac_filter_hash_lock
1667 * ANOTHER NOTE: This function MUST be called from within the context of
1668 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1669 * instead of list_for_each_entry().
1671 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1676 /* If the filter was never added to firmware then we can just delete it
1677 * directly and we don't want to set the status to remove or else an
1678 * admin queue command will unnecessarily fire.
1680 if ((f->state == I40E_FILTER_FAILED) ||
1681 (f->state == I40E_FILTER_NEW)) {
1682 hash_del(&f->hlist);
1685 f->state = I40E_FILTER_REMOVE;
1688 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1689 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1693 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1694 * @vsi: the VSI to be searched
1695 * @macaddr: the MAC address
1698 * NOTE: This function is expected to be called with mac_filter_hash_lock
1700 * ANOTHER NOTE: This function MUST be called from within the context of
1701 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1702 * instead of list_for_each_entry().
1704 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1706 struct i40e_mac_filter *f;
1708 if (!vsi || !macaddr)
1711 f = i40e_find_filter(vsi, macaddr, vlan);
1712 __i40e_del_filter(vsi, f);
1716 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1717 * @vsi: the VSI to be searched
1718 * @macaddr: the mac address to be filtered
1720 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1721 * go through all the macvlan filters and add a macvlan filter for each
1722 * unique vlan that already exists. If a PVID has been assigned, instead only
1723 * add the macaddr to that VLAN.
1725 * Returns last filter added on success, else NULL
1727 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1730 struct i40e_mac_filter *f, *add = NULL;
1731 struct hlist_node *h;
1735 return i40e_add_filter(vsi, macaddr,
1736 le16_to_cpu(vsi->info.pvid));
1738 if (!i40e_is_vsi_in_vlan(vsi))
1739 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1741 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1742 if (f->state == I40E_FILTER_REMOVE)
1744 add = i40e_add_filter(vsi, macaddr, f->vlan);
1753 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1754 * @vsi: the VSI to be searched
1755 * @macaddr: the mac address to be removed
1757 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1760 * Returns 0 for success, or error
1762 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1764 struct i40e_mac_filter *f;
1765 struct hlist_node *h;
1769 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1770 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1771 if (ether_addr_equal(macaddr, f->macaddr)) {
1772 __i40e_del_filter(vsi, f);
1784 * i40e_set_mac - NDO callback to set mac address
1785 * @netdev: network interface device structure
1786 * @p: pointer to an address structure
1788 * Returns 0 on success, negative on failure
1790 static int i40e_set_mac(struct net_device *netdev, void *p)
1792 struct i40e_netdev_priv *np = netdev_priv(netdev);
1793 struct i40e_vsi *vsi = np->vsi;
1794 struct i40e_pf *pf = vsi->back;
1795 struct i40e_hw *hw = &pf->hw;
1796 struct sockaddr *addr = p;
1798 if (!is_valid_ether_addr(addr->sa_data))
1799 return -EADDRNOTAVAIL;
1801 if (test_bit(__I40E_DOWN, pf->state) ||
1802 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1803 return -EADDRNOTAVAIL;
1805 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1806 netdev_info(netdev, "returning to hw mac address %pM\n",
1809 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1811 /* Copy the address first, so that we avoid a possible race with
1813 * - Remove old address from MAC filter
1814 * - Copy new address
1815 * - Add new address to MAC filter
1817 spin_lock_bh(&vsi->mac_filter_hash_lock);
1818 i40e_del_mac_filter(vsi, netdev->dev_addr);
1819 eth_hw_addr_set(netdev, addr->sa_data);
1820 i40e_add_mac_filter(vsi, netdev->dev_addr);
1821 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1823 if (vsi->type == I40E_VSI_MAIN) {
1826 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1827 addr->sa_data, NULL);
1829 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
1831 i40e_aq_str(hw, hw->aq.asq_last_status));
1834 /* schedule our worker thread which will take care of
1835 * applying the new filter changes
1837 i40e_service_event_schedule(pf);
1842 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1843 * @vsi: vsi structure
1844 * @seed: RSS hash seed
1845 * @lut: pointer to lookup table of lut_size
1846 * @lut_size: size of the lookup table
1848 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1849 u8 *lut, u16 lut_size)
1851 struct i40e_pf *pf = vsi->back;
1852 struct i40e_hw *hw = &pf->hw;
1856 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1857 (struct i40e_aqc_get_set_rss_key_data *)seed;
1858 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1860 dev_info(&pf->pdev->dev,
1861 "Cannot set RSS key, err %pe aq_err %s\n",
1863 i40e_aq_str(hw, hw->aq.asq_last_status));
1868 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1870 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1872 dev_info(&pf->pdev->dev,
1873 "Cannot set RSS lut, err %pe aq_err %s\n",
1875 i40e_aq_str(hw, hw->aq.asq_last_status));
1883 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1884 * @vsi: VSI structure
1886 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1888 struct i40e_pf *pf = vsi->back;
1889 u8 seed[I40E_HKEY_ARRAY_SIZE];
1893 if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
1896 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1897 vsi->num_queue_pairs);
1900 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1904 /* Use the user configured hash keys and lookup table if there is one,
1905 * otherwise use default
1907 if (vsi->rss_lut_user)
1908 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1910 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1911 if (vsi->rss_hkey_user)
1912 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1914 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1915 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1921 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1922 * @vsi: the VSI being configured,
1923 * @ctxt: VSI context structure
1924 * @enabled_tc: number of traffic classes to enable
1926 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1928 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1929 struct i40e_vsi_context *ctxt,
1932 u16 qcount = 0, max_qcount, qmap, sections = 0;
1933 int i, override_q, pow, num_qps, ret;
1934 u8 netdev_tc = 0, offset = 0;
1936 if (vsi->type != I40E_VSI_MAIN)
1938 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1939 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1940 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1941 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1942 num_qps = vsi->mqprio_qopt.qopt.count[0];
1944 /* find the next higher power-of-2 of num queue pairs */
1945 pow = ilog2(num_qps);
1946 if (!is_power_of_2(num_qps))
1948 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1949 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1951 /* Setup queue offset/count for all TCs for given VSI */
1952 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1953 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1954 /* See if the given TC is enabled for the given VSI */
1955 if (vsi->tc_config.enabled_tc & BIT(i)) {
1956 offset = vsi->mqprio_qopt.qopt.offset[i];
1957 qcount = vsi->mqprio_qopt.qopt.count[i];
1958 if (qcount > max_qcount)
1959 max_qcount = qcount;
1960 vsi->tc_config.tc_info[i].qoffset = offset;
1961 vsi->tc_config.tc_info[i].qcount = qcount;
1962 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1964 /* TC is not enabled so set the offset to
1965 * default queue and allocate one queue
1968 vsi->tc_config.tc_info[i].qoffset = 0;
1969 vsi->tc_config.tc_info[i].qcount = 1;
1970 vsi->tc_config.tc_info[i].netdev_tc = 0;
1974 /* Set actual Tx/Rx queue pairs */
1975 vsi->num_queue_pairs = offset + qcount;
1977 /* Setup queue TC[0].qmap for given VSI context */
1978 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1979 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1980 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1981 ctxt->info.valid_sections |= cpu_to_le16(sections);
1983 /* Reconfigure RSS for main VSI with max queue count */
1984 vsi->rss_size = max_qcount;
1985 ret = i40e_vsi_config_rss(vsi);
1987 dev_info(&vsi->back->pdev->dev,
1988 "Failed to reconfig rss for num_queues (%u)\n",
1992 vsi->reconfig_rss = true;
1993 dev_dbg(&vsi->back->pdev->dev,
1994 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1996 /* Find queue count available for channel VSIs and starting offset
1999 override_q = vsi->mqprio_qopt.qopt.count[0];
2000 if (override_q && override_q < vsi->num_queue_pairs) {
2001 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
2002 vsi->next_base_queue = override_q;
2008 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2009 * @vsi: the VSI being setup
2010 * @ctxt: VSI context structure
2011 * @enabled_tc: Enabled TCs bitmap
2012 * @is_add: True if called before Add VSI
2014 * Setup VSI queue mapping for enabled traffic classes.
2016 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2017 struct i40e_vsi_context *ctxt,
2021 struct i40e_pf *pf = vsi->back;
2031 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2033 /* zero out queue mapping, it will get updated on the end of the function */
2034 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2036 if (vsi->type == I40E_VSI_MAIN) {
2037 /* This code helps add more queue to the VSI if we have
2038 * more cores than RSS can support, the higher cores will
2039 * be served by ATR or other filters. Furthermore, the
2040 * non-zero req_queue_pairs says that user requested a new
2041 * queue count via ethtool's set_channels, so use this
2042 * value for queues distribution across traffic classes
2043 * We need at least one queue pair for the interface
2044 * to be usable as we see in else statement.
2046 if (vsi->req_queue_pairs > 0)
2047 vsi->num_queue_pairs = vsi->req_queue_pairs;
2048 else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
2049 vsi->num_queue_pairs = pf->num_lan_msix;
2051 vsi->num_queue_pairs = 1;
2054 /* Number of queues per enabled TC */
2055 if (vsi->type == I40E_VSI_MAIN ||
2056 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2057 num_tc_qps = vsi->num_queue_pairs;
2059 num_tc_qps = vsi->alloc_queue_pairs;
2061 if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
2062 /* Find numtc from enabled TC bitmap */
2063 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2064 if (enabled_tc & BIT(i)) /* TC is enabled */
2068 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2071 num_tc_qps = num_tc_qps / numtc;
2072 num_tc_qps = min_t(int, num_tc_qps,
2073 i40e_pf_get_max_q_per_tc(pf));
2076 vsi->tc_config.numtc = numtc;
2077 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2079 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
2080 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
2081 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2083 /* Setup queue offset/count for all TCs for given VSI */
2084 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2085 /* See if the given TC is enabled for the given VSI */
2086 if (vsi->tc_config.enabled_tc & BIT(i)) {
2090 switch (vsi->type) {
2092 if ((!test_bit(I40E_FLAG_FD_SB_ENA,
2094 !test_bit(I40E_FLAG_FD_ATR_ENA,
2096 vsi->tc_config.enabled_tc != 1) {
2097 qcount = min_t(int, pf->alloc_rss_size,
2103 case I40E_VSI_SRIOV:
2104 case I40E_VSI_VMDQ2:
2106 qcount = num_tc_qps;
2110 vsi->tc_config.tc_info[i].qoffset = offset;
2111 vsi->tc_config.tc_info[i].qcount = qcount;
2113 /* find the next higher power-of-2 of num queue pairs */
2116 while (num_qps && (BIT_ULL(pow) < qcount)) {
2121 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2123 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2124 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2128 /* TC is not enabled so set the offset to
2129 * default queue and allocate one queue
2132 vsi->tc_config.tc_info[i].qoffset = 0;
2133 vsi->tc_config.tc_info[i].qcount = 1;
2134 vsi->tc_config.tc_info[i].netdev_tc = 0;
2138 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2140 /* Do not change previously set num_queue_pairs for PFs and VFs*/
2141 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2142 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2143 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2144 vsi->num_queue_pairs = offset;
2146 /* Scheduler section valid can only be set for ADD VSI */
2148 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2150 ctxt->info.up_enable_bits = enabled_tc;
2152 if (vsi->type == I40E_VSI_SRIOV) {
2153 ctxt->info.mapping_flags |=
2154 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2155 for (i = 0; i < vsi->num_queue_pairs; i++)
2156 ctxt->info.queue_mapping[i] =
2157 cpu_to_le16(vsi->base_queue + i);
2159 ctxt->info.mapping_flags |=
2160 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2161 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2163 ctxt->info.valid_sections |= cpu_to_le16(sections);
2167 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2168 * @netdev: the netdevice
2169 * @addr: address to add
2171 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2172 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2174 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2176 struct i40e_netdev_priv *np = netdev_priv(netdev);
2177 struct i40e_vsi *vsi = np->vsi;
2179 if (i40e_add_mac_filter(vsi, addr))
2186 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2187 * @netdev: the netdevice
2188 * @addr: address to add
2190 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
2191 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2193 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2195 struct i40e_netdev_priv *np = netdev_priv(netdev);
2196 struct i40e_vsi *vsi = np->vsi;
2198 /* Under some circumstances, we might receive a request to delete
2199 * our own device address from our uc list. Because we store the
2200 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2201 * such requests and not delete our device address from this list.
2203 if (ether_addr_equal(addr, netdev->dev_addr))
2206 i40e_del_mac_filter(vsi, addr);
2212 * i40e_set_rx_mode - NDO callback to set the netdev filters
2213 * @netdev: network interface device structure
2215 static void i40e_set_rx_mode(struct net_device *netdev)
2217 struct i40e_netdev_priv *np = netdev_priv(netdev);
2218 struct i40e_vsi *vsi = np->vsi;
2220 spin_lock_bh(&vsi->mac_filter_hash_lock);
2222 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2223 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
2225 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2227 /* check for other flag changes */
2228 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2229 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2230 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
2235 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2236 * @vsi: Pointer to VSI struct
2237 * @from: Pointer to list which contains MAC filter entries - changes to
2238 * those entries needs to be undone.
2240 * MAC filter entries from this list were slated for deletion.
2242 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2243 struct hlist_head *from)
2245 struct i40e_mac_filter *f;
2246 struct hlist_node *h;
2248 hlist_for_each_entry_safe(f, h, from, hlist) {
2249 u64 key = i40e_addr_to_hkey(f->macaddr);
2251 /* Move the element back into MAC filter list*/
2252 hlist_del(&f->hlist);
2253 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2258 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2259 * @vsi: Pointer to vsi struct
2260 * @from: Pointer to list which contains MAC filter entries - changes to
2261 * those entries needs to be undone.
2263 * MAC filter entries from this list were slated for addition.
2265 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2266 struct hlist_head *from)
2268 struct i40e_new_mac_filter *new;
2269 struct hlist_node *h;
2271 hlist_for_each_entry_safe(new, h, from, hlist) {
2272 /* We can simply free the wrapper structure */
2273 hlist_del(&new->hlist);
2274 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2280 * i40e_next_filter - Get the next non-broadcast filter from a list
2281 * @next: pointer to filter in list
2283 * Returns the next non-broadcast filter in the list. Required so that we
2284 * ignore broadcast filters within the list, since these are not handled via
2285 * the normal firmware update path.
2288 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2290 hlist_for_each_entry_continue(next, hlist) {
2291 if (!is_broadcast_ether_addr(next->f->macaddr))
2299 * i40e_update_filter_state - Update filter state based on return data
2301 * @count: Number of filters added
2302 * @add_list: return data from fw
2303 * @add_head: pointer to first filter in current batch
2305 * MAC filter entries from list were slated to be added to device. Returns
2306 * number of successful filters. Note that 0 does NOT mean success!
2309 i40e_update_filter_state(int count,
2310 struct i40e_aqc_add_macvlan_element_data *add_list,
2311 struct i40e_new_mac_filter *add_head)
2316 for (i = 0; i < count; i++) {
2317 /* Always check status of each filter. We don't need to check
2318 * the firmware return status because we pre-set the filter
2319 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2320 * request to the adminq. Thus, if it no longer matches then
2321 * we know the filter is active.
2323 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2324 add_head->state = I40E_FILTER_FAILED;
2326 add_head->state = I40E_FILTER_ACTIVE;
2330 add_head = i40e_next_filter(add_head);
2339 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2340 * @vsi: ptr to the VSI
2341 * @vsi_name: name to display in messages
2342 * @list: the list of filters to send to firmware
2343 * @num_del: the number of filters to delete
2344 * @retval: Set to -EIO on failure to delete
2346 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2347 * *retval instead of a return value so that success does not force ret_val to
2348 * be set to 0. This ensures that a sequence of calls to this function
2349 * preserve the previous value of *retval on successful delete.
2352 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2353 struct i40e_aqc_remove_macvlan_element_data *list,
2354 int num_del, int *retval)
2356 struct i40e_hw *hw = &vsi->back->hw;
2357 enum i40e_admin_queue_err aq_status;
2360 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
2363 /* Explicitly ignore and do not report when firmware returns ENOENT */
2364 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2366 dev_info(&vsi->back->pdev->dev,
2367 "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
2368 vsi_name, ERR_PTR(aq_ret),
2369 i40e_aq_str(hw, aq_status));
2374 * i40e_aqc_add_filters - Request firmware to add a set of filters
2375 * @vsi: ptr to the VSI
2376 * @vsi_name: name to display in messages
2377 * @list: the list of filters to send to firmware
2378 * @add_head: Position in the add hlist
2379 * @num_add: the number of filters to add
2381 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2382 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2383 * space for more filters.
2386 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2387 struct i40e_aqc_add_macvlan_element_data *list,
2388 struct i40e_new_mac_filter *add_head,
2391 struct i40e_hw *hw = &vsi->back->hw;
2392 enum i40e_admin_queue_err aq_status;
2395 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
2396 fcnt = i40e_update_filter_state(num_add, list, add_head);
2398 if (fcnt != num_add) {
2399 if (vsi->type == I40E_VSI_MAIN) {
2400 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2401 dev_warn(&vsi->back->pdev->dev,
2402 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2403 i40e_aq_str(hw, aq_status), vsi_name);
2404 } else if (vsi->type == I40E_VSI_SRIOV ||
2405 vsi->type == I40E_VSI_VMDQ1 ||
2406 vsi->type == I40E_VSI_VMDQ2) {
2407 dev_warn(&vsi->back->pdev->dev,
2408 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2409 i40e_aq_str(hw, aq_status), vsi_name,
2412 dev_warn(&vsi->back->pdev->dev,
2413 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2414 i40e_aq_str(hw, aq_status), vsi_name,
2421 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2422 * @vsi: pointer to the VSI
2423 * @vsi_name: the VSI name
2426 * This function sets or clears the promiscuous broadcast flags for VLAN
2427 * filters in order to properly receive broadcast frames. Assumes that only
2428 * broadcast filters are passed.
2430 * Returns status indicating success or failure;
2433 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2434 struct i40e_mac_filter *f)
2436 bool enable = f->state == I40E_FILTER_NEW;
2437 struct i40e_hw *hw = &vsi->back->hw;
2440 if (f->vlan == I40E_VLAN_ANY) {
2441 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2446 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2454 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2455 dev_warn(&vsi->back->pdev->dev,
2456 "Error %s, forcing overflow promiscuous on %s\n",
2457 i40e_aq_str(hw, hw->aq.asq_last_status),
2465 * i40e_set_promiscuous - set promiscuous mode
2466 * @pf: board private structure
2467 * @promisc: promisc on or off
2469 * There are different ways of setting promiscuous mode on a PF depending on
2470 * what state/environment we're in. This identifies and sets it appropriately.
2471 * Returns 0 on success.
2473 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2475 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2476 struct i40e_hw *hw = &pf->hw;
2479 if (vsi->type == I40E_VSI_MAIN &&
2480 pf->lan_veb != I40E_NO_VEB &&
2481 !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
2482 /* set defport ON for Main VSI instead of true promisc
2483 * this way we will get all unicast/multicast and VLAN
2484 * promisc behavior but will not get VF or VMDq traffic
2485 * replicated on the Main VSI.
2488 aq_ret = i40e_aq_set_default_vsi(hw,
2492 aq_ret = i40e_aq_clear_default_vsi(hw,
2496 dev_info(&pf->pdev->dev,
2497 "Set default VSI failed, err %pe, aq_err %s\n",
2499 i40e_aq_str(hw, hw->aq.asq_last_status));
2502 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2508 dev_info(&pf->pdev->dev,
2509 "set unicast promisc failed, err %pe, aq_err %s\n",
2511 i40e_aq_str(hw, hw->aq.asq_last_status));
2513 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2518 dev_info(&pf->pdev->dev,
2519 "set multicast promisc failed, err %pe, aq_err %s\n",
2521 i40e_aq_str(hw, hw->aq.asq_last_status));
2526 pf->cur_promisc = promisc;
2532 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2533 * @vsi: ptr to the VSI
2535 * Push any outstanding VSI filter changes through the AdminQ.
2537 * Returns 0 or error value
2539 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2541 struct hlist_head tmp_add_list, tmp_del_list;
2542 struct i40e_mac_filter *f;
2543 struct i40e_new_mac_filter *new, *add_head = NULL;
2544 struct i40e_hw *hw = &vsi->back->hw;
2545 bool old_overflow, new_overflow;
2546 unsigned int failed_filters = 0;
2547 unsigned int vlan_filters = 0;
2548 char vsi_name[16] = "PF";
2549 int filter_list_len = 0;
2550 u32 changed_flags = 0;
2551 struct hlist_node *h;
2561 /* empty array typed pointers, kcalloc later */
2562 struct i40e_aqc_add_macvlan_element_data *add_list;
2563 struct i40e_aqc_remove_macvlan_element_data *del_list;
2565 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2566 usleep_range(1000, 2000);
2569 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2572 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2573 vsi->current_netdev_flags = vsi->netdev->flags;
2576 INIT_HLIST_HEAD(&tmp_add_list);
2577 INIT_HLIST_HEAD(&tmp_del_list);
2579 if (vsi->type == I40E_VSI_SRIOV)
2580 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2581 else if (vsi->type != I40E_VSI_MAIN)
2582 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2584 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2585 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2587 spin_lock_bh(&vsi->mac_filter_hash_lock);
2588 /* Create a list of filters to delete. */
2589 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2590 if (f->state == I40E_FILTER_REMOVE) {
2591 /* Move the element into temporary del_list */
2592 hash_del(&f->hlist);
2593 hlist_add_head(&f->hlist, &tmp_del_list);
2595 /* Avoid counting removed filters */
2598 if (f->state == I40E_FILTER_NEW) {
2599 /* Create a temporary i40e_new_mac_filter */
2600 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2602 goto err_no_memory_locked;
2604 /* Store pointer to the real filter */
2606 new->state = f->state;
2608 /* Add it to the hash list */
2609 hlist_add_head(&new->hlist, &tmp_add_list);
2612 /* Count the number of active (current and new) VLAN
2613 * filters we have now. Does not count filters which
2614 * are marked for deletion.
2620 if (vsi->type != I40E_VSI_SRIOV)
2621 retval = i40e_correct_mac_vlan_filters
2622 (vsi, &tmp_add_list, &tmp_del_list,
2625 retval = i40e_correct_vf_mac_vlan_filters
2626 (vsi, &tmp_add_list, &tmp_del_list,
2627 vlan_filters, pf->vf[vsi->vf_id].trusted);
2629 hlist_for_each_entry(new, &tmp_add_list, hlist)
2630 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2633 goto err_no_memory_locked;
2635 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2638 /* Now process 'del_list' outside the lock */
2639 if (!hlist_empty(&tmp_del_list)) {
2640 filter_list_len = hw->aq.asq_buf_size /
2641 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2642 list_size = filter_list_len *
2643 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2644 del_list = kzalloc(list_size, GFP_ATOMIC);
2648 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2651 /* handle broadcast filters by updating the broadcast
2652 * promiscuous flag and release filter list.
2654 if (is_broadcast_ether_addr(f->macaddr)) {
2655 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2657 hlist_del(&f->hlist);
2662 /* add to delete list */
2663 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2664 if (f->vlan == I40E_VLAN_ANY) {
2665 del_list[num_del].vlan_tag = 0;
2666 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2668 del_list[num_del].vlan_tag =
2669 cpu_to_le16((u16)(f->vlan));
2672 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2673 del_list[num_del].flags = cmd_flags;
2676 /* flush a full buffer */
2677 if (num_del == filter_list_len) {
2678 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2680 memset(del_list, 0, list_size);
2683 /* Release memory for MAC filter entries which were
2684 * synced up with HW.
2686 hlist_del(&f->hlist);
2691 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2699 if (!hlist_empty(&tmp_add_list)) {
2700 /* Do all the adds now. */
2701 filter_list_len = hw->aq.asq_buf_size /
2702 sizeof(struct i40e_aqc_add_macvlan_element_data);
2703 list_size = filter_list_len *
2704 sizeof(struct i40e_aqc_add_macvlan_element_data);
2705 add_list = kzalloc(list_size, GFP_ATOMIC);
2710 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2711 /* handle broadcast filters by updating the broadcast
2712 * promiscuous flag instead of adding a MAC filter.
2714 if (is_broadcast_ether_addr(new->f->macaddr)) {
2715 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2717 new->state = I40E_FILTER_FAILED;
2719 new->state = I40E_FILTER_ACTIVE;
2723 /* add to add array */
2727 ether_addr_copy(add_list[num_add].mac_addr,
2729 if (new->f->vlan == I40E_VLAN_ANY) {
2730 add_list[num_add].vlan_tag = 0;
2731 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2733 add_list[num_add].vlan_tag =
2734 cpu_to_le16((u16)(new->f->vlan));
2736 add_list[num_add].queue_number = 0;
2737 /* set invalid match method for later detection */
2738 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2739 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2740 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2743 /* flush a full buffer */
2744 if (num_add == filter_list_len) {
2745 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2747 memset(add_list, 0, list_size);
2752 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2755 /* Now move all of the filters from the temp add list back to
2758 spin_lock_bh(&vsi->mac_filter_hash_lock);
2759 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2760 /* Only update the state if we're still NEW */
2761 if (new->f->state == I40E_FILTER_NEW)
2762 new->f->state = new->state;
2763 hlist_del(&new->hlist);
2764 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2767 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2772 /* Determine the number of active and failed filters. */
2773 spin_lock_bh(&vsi->mac_filter_hash_lock);
2774 vsi->active_filters = 0;
2775 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2776 if (f->state == I40E_FILTER_ACTIVE)
2777 vsi->active_filters++;
2778 else if (f->state == I40E_FILTER_FAILED)
2781 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2783 /* Check if we are able to exit overflow promiscuous mode. We can
2784 * safely exit if we didn't just enter, we no longer have any failed
2785 * filters, and we have reduced filters below the threshold value.
2787 if (old_overflow && !failed_filters &&
2788 vsi->active_filters < vsi->promisc_threshold) {
2789 dev_info(&pf->pdev->dev,
2790 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2792 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2793 vsi->promisc_threshold = 0;
2796 /* if the VF is not trusted do not do promisc */
2797 if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
2798 !pf->vf[vsi->vf_id].trusted) {
2799 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2803 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2805 /* If we are entering overflow promiscuous, we need to calculate a new
2806 * threshold for when we are safe to exit
2808 if (!old_overflow && new_overflow)
2809 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2811 /* check for changes in promiscuous modes */
2812 if (changed_flags & IFF_ALLMULTI) {
2813 bool cur_multipromisc;
2815 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2816 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2821 retval = i40e_aq_rc_to_posix(aq_ret,
2822 hw->aq.asq_last_status);
2823 dev_info(&pf->pdev->dev,
2824 "set multi promisc failed on %s, err %pe aq_err %s\n",
2827 i40e_aq_str(hw, hw->aq.asq_last_status));
2829 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2830 cur_multipromisc ? "entering" : "leaving");
2834 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2837 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2839 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2841 retval = i40e_aq_rc_to_posix(aq_ret,
2842 hw->aq.asq_last_status);
2843 dev_info(&pf->pdev->dev,
2844 "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
2845 cur_promisc ? "on" : "off",
2848 i40e_aq_str(hw, hw->aq.asq_last_status));
2852 /* if something went wrong then set the changed flag so we try again */
2854 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2856 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2860 /* Restore elements on the temporary add and delete lists */
2861 spin_lock_bh(&vsi->mac_filter_hash_lock);
2862 err_no_memory_locked:
2863 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2864 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2865 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2867 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2868 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2873 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2874 * @pf: board private structure
2876 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2878 struct i40e_vsi *vsi;
2883 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2885 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2886 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2890 i40e_pf_for_each_vsi(pf, v, vsi) {
2891 if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2892 !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
2893 int ret = i40e_sync_vsi_filters(vsi);
2896 /* come back and try again later */
2897 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2906 * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
2908 * @vsi: VSI to calculate rx_buf_len from
2910 static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
2912 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags))
2913 return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
2915 return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
2919 * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
2921 * @xdp_prog: XDP program
2923 static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi,
2924 struct bpf_prog *xdp_prog)
2926 u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
2929 if (xdp_prog && !xdp_prog->aux->xdp_has_frags)
2932 chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
2934 return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER);
2938 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2939 * @netdev: network interface device structure
2940 * @new_mtu: new value for maximum frame size
2942 * Returns 0 on success, negative on failure
2944 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2946 struct i40e_netdev_priv *np = netdev_priv(netdev);
2947 struct i40e_vsi *vsi = np->vsi;
2948 struct i40e_pf *pf = vsi->back;
2951 frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
2952 if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) {
2953 netdev_err(netdev, "Error changing mtu to %d, Max is %d\n",
2954 new_mtu, frame_size - I40E_PACKET_HDR_PAD);
2958 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2959 netdev->mtu, new_mtu);
2960 netdev->mtu = new_mtu;
2961 if (netif_running(netdev))
2962 i40e_vsi_reinit_locked(vsi);
2963 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2964 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2969 * i40e_ioctl - Access the hwtstamp interface
2970 * @netdev: network interface device structure
2971 * @ifr: interface request data
2972 * @cmd: ioctl command
2974 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2976 struct i40e_netdev_priv *np = netdev_priv(netdev);
2977 struct i40e_pf *pf = np->vsi->back;
2981 return i40e_ptp_get_ts_config(pf, ifr);
2983 return i40e_ptp_set_ts_config(pf, ifr);
2990 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2991 * @vsi: the vsi being adjusted
2993 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2995 struct i40e_vsi_context ctxt;
2998 /* Don't modify stripping options if a port VLAN is active */
3002 if ((vsi->info.valid_sections &
3003 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3004 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
3005 return; /* already enabled */
3007 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3008 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3009 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3011 ctxt.seid = vsi->seid;
3012 ctxt.info = vsi->info;
3013 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3015 dev_info(&vsi->back->pdev->dev,
3016 "update vlan stripping failed, err %pe aq_err %s\n",
3018 i40e_aq_str(&vsi->back->hw,
3019 vsi->back->hw.aq.asq_last_status));
3024 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
3025 * @vsi: the vsi being adjusted
3027 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
3029 struct i40e_vsi_context ctxt;
3032 /* Don't modify stripping options if a port VLAN is active */
3036 if ((vsi->info.valid_sections &
3037 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3038 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3039 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3040 return; /* already disabled */
3042 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3043 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3044 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3046 ctxt.seid = vsi->seid;
3047 ctxt.info = vsi->info;
3048 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3050 dev_info(&vsi->back->pdev->dev,
3051 "update vlan stripping failed, err %pe aq_err %s\n",
3053 i40e_aq_str(&vsi->back->hw,
3054 vsi->back->hw.aq.asq_last_status));
3059 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3060 * @vsi: the vsi being configured
3061 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3063 * This is a helper function for adding a new MAC/VLAN filter with the
3064 * specified VLAN for each existing MAC address already in the hash table.
3065 * This function does *not* perform any accounting to update filters based on
3068 * NOTE: this function expects to be called while under the
3069 * mac_filter_hash_lock
3071 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3073 struct i40e_mac_filter *f, *add_f;
3074 struct hlist_node *h;
3077 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3078 /* If we're asked to add a filter that has been marked for
3079 * removal, it is safe to simply restore it to active state.
3080 * __i40e_del_filter will have simply deleted any filters which
3081 * were previously marked NEW or FAILED, so if it is currently
3082 * marked REMOVE it must have previously been ACTIVE. Since we
3083 * haven't yet run the sync filters task, just restore this
3084 * filter to the ACTIVE state so that the sync task leaves it
3087 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3088 f->state = I40E_FILTER_ACTIVE;
3090 } else if (f->state == I40E_FILTER_REMOVE) {
3093 add_f = i40e_add_filter(vsi, f->macaddr, vid);
3095 dev_info(&vsi->back->pdev->dev,
3096 "Could not add vlan filter %d for %pM\n",
3106 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3107 * @vsi: the VSI being configured
3108 * @vid: VLAN id to be added
3110 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3117 /* The network stack will attempt to add VID=0, with the intention to
3118 * receive priority tagged packets with a VLAN of 0. Our HW receives
3119 * these packets by default when configured to receive untagged
3120 * packets, so we don't need to add a filter for this case.
3121 * Additionally, HW interprets adding a VID=0 filter as meaning to
3122 * receive *only* tagged traffic and stops receiving untagged traffic.
3123 * Thus, we do not want to actually add a filter for VID=0
3128 /* Locked once because all functions invoked below iterates list*/
3129 spin_lock_bh(&vsi->mac_filter_hash_lock);
3130 err = i40e_add_vlan_all_mac(vsi, vid);
3131 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3135 /* schedule our worker thread which will take care of
3136 * applying the new filter changes
3138 i40e_service_event_schedule(vsi->back);
3143 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3144 * @vsi: the vsi being configured
3145 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3147 * This function should be used to remove all VLAN filters which match the
3148 * given VID. It does not schedule the service event and does not take the
3149 * mac_filter_hash_lock so it may be combined with other operations under
3150 * a single invocation of the mac_filter_hash_lock.
3152 * NOTE: this function expects to be called while under the
3153 * mac_filter_hash_lock
3155 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3157 struct i40e_mac_filter *f;
3158 struct hlist_node *h;
3161 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3163 __i40e_del_filter(vsi, f);
3168 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3169 * @vsi: the VSI being configured
3170 * @vid: VLAN id to be removed
3172 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3174 if (!vid || vsi->info.pvid)
3177 spin_lock_bh(&vsi->mac_filter_hash_lock);
3178 i40e_rm_vlan_all_mac(vsi, vid);
3179 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3181 /* schedule our worker thread which will take care of
3182 * applying the new filter changes
3184 i40e_service_event_schedule(vsi->back);
3188 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3189 * @netdev: network interface to be adjusted
3190 * @proto: unused protocol value
3191 * @vid: vlan id to be added
3193 * net_device_ops implementation for adding vlan ids
3195 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3196 __always_unused __be16 proto, u16 vid)
3198 struct i40e_netdev_priv *np = netdev_priv(netdev);
3199 struct i40e_vsi *vsi = np->vsi;
3202 if (vid >= VLAN_N_VID)
3205 ret = i40e_vsi_add_vlan(vsi, vid);
3207 set_bit(vid, vsi->active_vlans);
3213 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3214 * @netdev: network interface to be adjusted
3215 * @proto: unused protocol value
3216 * @vid: vlan id to be added
3218 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3219 __always_unused __be16 proto, u16 vid)
3221 struct i40e_netdev_priv *np = netdev_priv(netdev);
3222 struct i40e_vsi *vsi = np->vsi;
3224 if (vid >= VLAN_N_VID)
3226 set_bit(vid, vsi->active_vlans);
3230 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3231 * @netdev: network interface to be adjusted
3232 * @proto: unused protocol value
3233 * @vid: vlan id to be removed
3235 * net_device_ops implementation for removing vlan ids
3237 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3238 __always_unused __be16 proto, u16 vid)
3240 struct i40e_netdev_priv *np = netdev_priv(netdev);
3241 struct i40e_vsi *vsi = np->vsi;
3243 /* return code is ignored as there is nothing a user
3244 * can do about failure to remove and a log message was
3245 * already printed from the other function
3247 i40e_vsi_kill_vlan(vsi, vid);
3249 clear_bit(vid, vsi->active_vlans);
3255 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3256 * @vsi: the vsi being brought back up
3258 static void i40e_restore_vlan(struct i40e_vsi *vsi)
3265 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3266 i40e_vlan_stripping_enable(vsi);
3268 i40e_vlan_stripping_disable(vsi);
3270 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3271 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
3276 * i40e_vsi_add_pvid - Add pvid for the VSI
3277 * @vsi: the vsi being adjusted
3278 * @vid: the vlan id to set as a PVID
3280 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3282 struct i40e_vsi_context ctxt;
3285 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3286 vsi->info.pvid = cpu_to_le16(vid);
3287 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3288 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3289 I40E_AQ_VSI_PVLAN_EMOD_STR;
3291 ctxt.seid = vsi->seid;
3292 ctxt.info = vsi->info;
3293 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3295 dev_info(&vsi->back->pdev->dev,
3296 "add pvid failed, err %pe aq_err %s\n",
3298 i40e_aq_str(&vsi->back->hw,
3299 vsi->back->hw.aq.asq_last_status));
3307 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3308 * @vsi: the vsi being adjusted
3310 * Just use the vlan_rx_register() service to put it back to normal
3312 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3316 i40e_vlan_stripping_disable(vsi);
3320 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3321 * @vsi: ptr to the VSI
3323 * If this function returns with an error, then it's possible one or
3324 * more of the rings is populated (while the rest are not). It is the
3325 * callers duty to clean those orphaned rings.
3327 * Return 0 on success, negative on failure
3329 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3333 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3334 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3336 if (!i40e_enabled_xdp_vsi(vsi))
3339 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3340 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3346 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3347 * @vsi: ptr to the VSI
3349 * Free VSI's transmit software resources
3351 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3355 if (vsi->tx_rings) {
3356 for (i = 0; i < vsi->num_queue_pairs; i++)
3357 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3358 i40e_free_tx_resources(vsi->tx_rings[i]);
3361 if (vsi->xdp_rings) {
3362 for (i = 0; i < vsi->num_queue_pairs; i++)
3363 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3364 i40e_free_tx_resources(vsi->xdp_rings[i]);
3369 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3370 * @vsi: ptr to the VSI
3372 * If this function returns with an error, then it's possible one or
3373 * more of the rings is populated (while the rest are not). It is the
3374 * callers duty to clean those orphaned rings.
3376 * Return 0 on success, negative on failure
3378 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3382 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3383 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3388 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3389 * @vsi: ptr to the VSI
3391 * Free all receive software resources
3393 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3400 for (i = 0; i < vsi->num_queue_pairs; i++)
3401 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3402 i40e_free_rx_resources(vsi->rx_rings[i]);
3406 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3407 * @ring: The Tx ring to configure
3409 * This enables/disables XPS for a given Tx descriptor ring
3410 * based on the TCs enabled for the VSI that ring belongs to.
3412 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3416 if (!ring->q_vector || !ring->netdev || ring->ch)
3419 /* We only initialize XPS once, so as not to overwrite user settings */
3420 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3423 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3424 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3429 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3430 * @ring: The Tx or Rx ring
3432 * Returns the AF_XDP buffer pool or NULL.
3434 static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3436 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3437 int qid = ring->queue_index;
3439 if (ring_is_xdp(ring))
3440 qid -= ring->vsi->alloc_queue_pairs;
3442 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3445 return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
3449 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3450 * @ring: The Tx ring to configure
3452 * Configure the Tx descriptor ring in the HMC context.
3454 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3456 struct i40e_vsi *vsi = ring->vsi;
3457 u16 pf_q = vsi->base_queue + ring->queue_index;
3458 struct i40e_hw *hw = &vsi->back->hw;
3459 struct i40e_hmc_obj_txq tx_ctx;
3463 if (ring_is_xdp(ring))
3464 ring->xsk_pool = i40e_xsk_pool(ring);
3466 /* some ATR related tx ring init */
3467 if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) {
3468 ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
3469 ring->atr_count = 0;
3471 ring->atr_sample_rate = 0;
3475 i40e_config_xps_tx_ring(ring);
3477 /* clear the context structure first */
3478 memset(&tx_ctx, 0, sizeof(tx_ctx));
3480 tx_ctx.new_context = 1;
3481 tx_ctx.base = (ring->dma / 128);
3482 tx_ctx.qlen = ring->count;
3483 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) ||
3484 test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags))
3486 if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags))
3487 tx_ctx.timesync_ena = 1;
3488 /* FDIR VSI tx ring can still use RS bit and writebacks */
3489 if (vsi->type != I40E_VSI_FDIR)
3490 tx_ctx.head_wb_ena = 1;
3491 tx_ctx.head_wb_addr = ring->dma +
3492 (ring->count * sizeof(struct i40e_tx_desc));
3494 /* As part of VSI creation/update, FW allocates certain
3495 * Tx arbitration queue sets for each TC enabled for
3496 * the VSI. The FW returns the handles to these queue
3497 * sets as part of the response buffer to Add VSI,
3498 * Update VSI, etc. AQ commands. It is expected that
3499 * these queue set handles be associated with the Tx
3500 * queues by the driver as part of the TX queue context
3501 * initialization. This has to be done regardless of
3502 * DCB as by default everything is mapped to TC0.
3507 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3510 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3512 tx_ctx.rdylist_act = 0;
3514 /* clear the context in the HMC */
3515 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3517 dev_info(&vsi->back->pdev->dev,
3518 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3519 ring->queue_index, pf_q, err);
3523 /* set the context in the HMC */
3524 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3526 dev_info(&vsi->back->pdev->dev,
3527 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3528 ring->queue_index, pf_q, err);
3532 /* Now associate this queue with this PCI function */
3534 if (ring->ch->type == I40E_VSI_VMDQ2)
3535 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3539 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
3540 ring->ch->vsi_number);
3542 if (vsi->type == I40E_VSI_VMDQ2) {
3543 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3544 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
3547 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3551 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
3552 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3555 /* cache tail off for easier writes later */
3556 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3562 * i40e_rx_offset - Return expected offset into page to access data
3563 * @rx_ring: Ring we are requesting offset of
3565 * Returns the offset value for ring into the data buffer.
3567 static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3569 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
3573 * i40e_configure_rx_ring - Configure a receive ring context
3574 * @ring: The Rx ring to configure
3576 * Configure the Rx descriptor ring in the HMC context.
3578 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3580 struct i40e_vsi *vsi = ring->vsi;
3581 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3582 u16 pf_q = vsi->base_queue + ring->queue_index;
3583 struct i40e_hw *hw = &vsi->back->hw;
3584 struct i40e_hmc_obj_rxq rx_ctx;
3588 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3590 /* clear the context structure first */
3591 memset(&rx_ctx, 0, sizeof(rx_ctx));
3593 ring->rx_buf_len = vsi->rx_buf_len;
3595 /* XDP RX-queue info only needed for RX rings exposed to XDP */
3596 if (ring->vsi->type != I40E_VSI_MAIN)
3599 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
3600 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
3602 ring->q_vector->napi.napi_id,
3608 ring->xsk_pool = i40e_xsk_pool(ring);
3609 if (ring->xsk_pool) {
3610 xdp_rxq_info_unreg(&ring->xdp_rxq);
3611 ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
3612 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
3614 ring->q_vector->napi.napi_id,
3618 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3619 MEM_TYPE_XSK_BUFF_POOL,
3623 dev_info(&vsi->back->pdev->dev,
3624 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3628 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3629 MEM_TYPE_PAGE_SHARED,
3636 xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
3638 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3639 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3641 rx_ctx.base = (ring->dma / 128);
3642 rx_ctx.qlen = ring->count;
3644 /* use 16 byte descriptors */
3647 /* descriptor type is always zero
3650 rx_ctx.hsplit_0 = 0;
3652 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3653 if (hw->revision_id == 0)
3654 rx_ctx.lrxqthresh = 0;
3656 rx_ctx.lrxqthresh = 1;
3657 rx_ctx.crcstrip = 1;
3659 /* this controls whether VLAN is stripped from inner headers */
3661 /* set the prefena field to 1 because the manual says to */
3664 /* clear the context in the HMC */
3665 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3667 dev_info(&vsi->back->pdev->dev,
3668 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3669 ring->queue_index, pf_q, err);
3673 /* set the context in the HMC */
3674 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3676 dev_info(&vsi->back->pdev->dev,
3677 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3678 ring->queue_index, pf_q, err);
3682 /* configure Rx buffer alignment */
3683 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) {
3684 if (I40E_2K_TOO_SMALL_WITH_PADDING) {
3685 dev_info(&vsi->back->pdev->dev,
3686 "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
3689 clear_ring_build_skb_enabled(ring);
3691 set_ring_build_skb_enabled(ring);
3694 ring->rx_offset = i40e_rx_offset(ring);
3696 /* cache tail for quicker writes, and clear the reg before use */
3697 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3698 writel(0, ring->tail);
3700 if (ring->xsk_pool) {
3701 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
3702 ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
3704 ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3707 /* Log this in case the user has forgotten to give the kernel
3708 * any buffers, even later in the application.
3710 dev_info(&vsi->back->pdev->dev,
3711 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3712 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3713 ring->queue_index, pf_q);
3720 * i40e_vsi_configure_tx - Configure the VSI for Tx
3721 * @vsi: VSI structure describing this set of rings and resources
3723 * Configure the Tx VSI for operation.
3725 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3730 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3731 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3733 if (err || !i40e_enabled_xdp_vsi(vsi))
3736 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3737 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3743 * i40e_vsi_configure_rx - Configure the VSI for Rx
3744 * @vsi: the VSI being configured
3746 * Configure the Rx VSI for operation.
3748 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3753 vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
3754 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
3756 #if (PAGE_SIZE < 8192)
3757 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
3758 vsi->netdev->mtu <= ETH_DATA_LEN) {
3759 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3760 vsi->max_frame = vsi->rx_buf_len;
3764 /* set up individual rings */
3765 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3766 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3772 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3773 * @vsi: ptr to the VSI
3775 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3777 struct i40e_ring *tx_ring, *rx_ring;
3778 u16 qoffset, qcount;
3781 if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
3782 /* Reset the TC information */
3783 for (i = 0; i < vsi->num_queue_pairs; i++) {
3784 rx_ring = vsi->rx_rings[i];
3785 tx_ring = vsi->tx_rings[i];
3786 rx_ring->dcb_tc = 0;
3787 tx_ring->dcb_tc = 0;
3792 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3793 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3796 qoffset = vsi->tc_config.tc_info[n].qoffset;
3797 qcount = vsi->tc_config.tc_info[n].qcount;
3798 for (i = qoffset; i < (qoffset + qcount); i++) {
3799 rx_ring = vsi->rx_rings[i];
3800 tx_ring = vsi->tx_rings[i];
3801 rx_ring->dcb_tc = n;
3802 tx_ring->dcb_tc = n;
3808 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3809 * @vsi: ptr to the VSI
3811 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3814 i40e_set_rx_mode(vsi->netdev);
3818 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3819 * @pf: Pointer to the targeted PF
3821 * Set all flow director counters to 0.
3823 static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3825 pf->fd_tcp4_filter_cnt = 0;
3826 pf->fd_udp4_filter_cnt = 0;
3827 pf->fd_sctp4_filter_cnt = 0;
3828 pf->fd_ip4_filter_cnt = 0;
3829 pf->fd_tcp6_filter_cnt = 0;
3830 pf->fd_udp6_filter_cnt = 0;
3831 pf->fd_sctp6_filter_cnt = 0;
3832 pf->fd_ip6_filter_cnt = 0;
3836 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3837 * @vsi: Pointer to the targeted VSI
3839 * This function replays the hlist on the hw where all the SB Flow Director
3840 * filters were saved.
3842 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3844 struct i40e_fdir_filter *filter;
3845 struct i40e_pf *pf = vsi->back;
3846 struct hlist_node *node;
3848 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
3851 /* Reset FDir counters as we're replaying all existing filters */
3852 i40e_reset_fdir_filter_cnt(pf);
3854 hlist_for_each_entry_safe(filter, node,
3855 &pf->fdir_filter_list, fdir_node) {
3856 i40e_add_del_fdir(vsi, filter, true);
3861 * i40e_vsi_configure - Set up the VSI for action
3862 * @vsi: the VSI being configured
3864 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3868 i40e_set_vsi_rx_mode(vsi);
3869 i40e_restore_vlan(vsi);
3870 i40e_vsi_config_dcb_rings(vsi);
3871 err = i40e_vsi_configure_tx(vsi);
3873 err = i40e_vsi_configure_rx(vsi);
3879 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3880 * @vsi: the VSI being configured
3882 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3884 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3885 struct i40e_pf *pf = vsi->back;
3886 struct i40e_hw *hw = &pf->hw;
3891 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3892 * and PFINT_LNKLSTn registers, e.g.:
3893 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3895 qp = vsi->base_queue;
3896 vector = vsi->base_vector;
3897 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3898 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3900 q_vector->rx.next_update = jiffies + 1;
3901 q_vector->rx.target_itr =
3902 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3903 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3904 q_vector->rx.target_itr >> 1);
3905 q_vector->rx.current_itr = q_vector->rx.target_itr;
3907 q_vector->tx.next_update = jiffies + 1;
3908 q_vector->tx.target_itr =
3909 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3910 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3911 q_vector->tx.target_itr >> 1);
3912 q_vector->tx.current_itr = q_vector->tx.target_itr;
3914 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3915 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3917 /* begin of linked list for RX queue assigned to this vector */
3918 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3919 for (q = 0; q < q_vector->num_ringpairs; q++) {
3920 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3923 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3924 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3925 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3926 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3927 (I40E_QUEUE_TYPE_TX <<
3928 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3930 wr32(hw, I40E_QINT_RQCTL(qp), val);
3933 /* TX queue with next queue set to TX */
3934 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3935 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3936 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3937 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3938 (I40E_QUEUE_TYPE_TX <<
3939 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3941 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3943 /* TX queue with next RX or end of linked list */
3944 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3945 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3946 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3947 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3948 (I40E_QUEUE_TYPE_RX <<
3949 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3951 /* Terminate the linked list */
3952 if (q == (q_vector->num_ringpairs - 1))
3953 val |= (I40E_QUEUE_END_OF_LIST <<
3954 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3956 wr32(hw, I40E_QINT_TQCTL(qp), val);
3965 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3966 * @pf: pointer to private device data structure
3968 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3970 struct i40e_hw *hw = &pf->hw;
3973 /* clear things first */
3974 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3975 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3977 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3978 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3979 I40E_PFINT_ICR0_ENA_GRST_MASK |
3980 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3981 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3982 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3983 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3984 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3986 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
3987 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3989 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
3990 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3992 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3994 /* SW_ITR_IDX = 0, but don't change INTENA */
3995 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3996 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3998 /* OTHER_ITR_IDX = 0 */
3999 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
4003 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
4004 * @vsi: the VSI being configured
4006 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
4008 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
4009 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4010 struct i40e_pf *pf = vsi->back;
4011 struct i40e_hw *hw = &pf->hw;
4013 /* set the ITR configuration */
4014 q_vector->rx.next_update = jiffies + 1;
4015 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
4016 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
4017 q_vector->rx.current_itr = q_vector->rx.target_itr;
4018 q_vector->tx.next_update = jiffies + 1;
4019 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
4020 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
4021 q_vector->tx.current_itr = q_vector->tx.target_itr;
4023 i40e_enable_misc_int_causes(pf);
4025 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
4026 wr32(hw, I40E_PFINT_LNKLST0, 0);
4028 /* Associate the queue pair to the vector and enable the queue
4029 * interrupt RX queue in linked list with next queue set to TX
4031 wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
4033 if (i40e_enabled_xdp_vsi(vsi)) {
4034 /* TX queue in linked list with next queue set to TX */
4035 wr32(hw, I40E_QINT_TQCTL(nextqp),
4036 I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
4039 /* last TX queue so the next RX queue doesn't matter */
4040 wr32(hw, I40E_QINT_TQCTL(0),
4041 I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
4046 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4047 * @pf: board private structure
4049 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4051 struct i40e_hw *hw = &pf->hw;
4053 wr32(hw, I40E_PFINT_DYN_CTL0,
4054 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4059 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4060 * @pf: board private structure
4062 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4064 struct i40e_hw *hw = &pf->hw;
4067 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4068 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4069 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4071 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4076 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4077 * @irq: interrupt number
4078 * @data: pointer to a q_vector
4080 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4082 struct i40e_q_vector *q_vector = data;
4084 if (!q_vector->tx.ring && !q_vector->rx.ring)
4087 napi_schedule_irqoff(&q_vector->napi);
4093 * i40e_irq_affinity_notify - Callback for affinity changes
4094 * @notify: context as to what irq was changed
4095 * @mask: the new affinity mask
4097 * This is a callback function used by the irq_set_affinity_notifier function
4098 * so that we may register to receive changes to the irq affinity masks.
4100 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4101 const cpumask_t *mask)
4103 struct i40e_q_vector *q_vector =
4104 container_of(notify, struct i40e_q_vector, affinity_notify);
4106 cpumask_copy(&q_vector->affinity_mask, mask);
4110 * i40e_irq_affinity_release - Callback for affinity notifier release
4111 * @ref: internal core kernel usage
4113 * This is a callback function used by the irq_set_affinity_notifier function
4114 * to inform the current notification subscriber that they will no longer
4115 * receive notifications.
4117 static void i40e_irq_affinity_release(struct kref *ref) {}
4120 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4121 * @vsi: the VSI being configured
4122 * @basename: name for the vector
4124 * Allocates MSI-X vectors and requests interrupts from the kernel.
4126 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4128 int q_vectors = vsi->num_q_vectors;
4129 struct i40e_pf *pf = vsi->back;
4130 int base = vsi->base_vector;
4137 for (vector = 0; vector < q_vectors; vector++) {
4138 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4140 irq_num = pf->msix_entries[base + vector].vector;
4142 if (q_vector->tx.ring && q_vector->rx.ring) {
4143 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4144 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4146 } else if (q_vector->rx.ring) {
4147 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4148 "%s-%s-%d", basename, "rx", rx_int_idx++);
4149 } else if (q_vector->tx.ring) {
4150 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
4151 "%s-%s-%d", basename, "tx", tx_int_idx++);
4153 /* skip this unused q_vector */
4156 err = request_irq(irq_num,
4162 dev_info(&pf->pdev->dev,
4163 "MSIX request_irq failed, error: %d\n", err);
4164 goto free_queue_irqs;
4167 /* register for affinity change notifications */
4168 q_vector->irq_num = irq_num;
4169 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4170 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4171 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
4172 /* Spread affinity hints out across online CPUs.
4174 * get_cpu_mask returns a static constant mask with
4175 * a permanent lifetime so it's ok to pass to
4176 * irq_update_affinity_hint without making a copy.
4178 cpu = cpumask_local_spread(q_vector->v_idx, -1);
4179 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
4182 vsi->irqs_ready = true;
4188 irq_num = pf->msix_entries[base + vector].vector;
4189 irq_set_affinity_notifier(irq_num, NULL);
4190 irq_update_affinity_hint(irq_num, NULL);
4191 free_irq(irq_num, &vsi->q_vectors[vector]);
4197 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4198 * @vsi: the VSI being un-configured
4200 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4202 struct i40e_pf *pf = vsi->back;
4203 struct i40e_hw *hw = &pf->hw;
4204 int base = vsi->base_vector;
4207 /* disable interrupt causation from each queue */
4208 for (i = 0; i < vsi->num_queue_pairs; i++) {
4211 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4212 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4213 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4215 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4216 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4217 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4219 if (!i40e_enabled_xdp_vsi(vsi))
4221 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4224 /* disable each interrupt */
4225 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4226 for (i = vsi->base_vector;
4227 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4228 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4231 for (i = 0; i < vsi->num_q_vectors; i++)
4232 synchronize_irq(pf->msix_entries[i + base].vector);
4234 /* Legacy and MSI mode - this stops all interrupt handling */
4235 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4236 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4238 synchronize_irq(pf->pdev->irq);
4243 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4244 * @vsi: the VSI being configured
4246 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4248 struct i40e_pf *pf = vsi->back;
4251 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4252 for (i = 0; i < vsi->num_q_vectors; i++)
4253 i40e_irq_dynamic_enable(vsi, i);
4255 i40e_irq_dynamic_enable_icr0(pf);
4258 i40e_flush(&pf->hw);
4263 * i40e_free_misc_vector - Free the vector that handles non-queue events
4264 * @pf: board private structure
4266 static void i40e_free_misc_vector(struct i40e_pf *pf)
4269 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4270 i40e_flush(&pf->hw);
4272 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
4273 free_irq(pf->msix_entries[0].vector, pf);
4274 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
4279 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4280 * @irq: interrupt number
4281 * @data: pointer to a q_vector
4283 * This is the handler used for all MSI/Legacy interrupts, and deals
4284 * with both queue and non-queue interrupts. This is also used in
4285 * MSIX mode to handle the non-queue interrupts.
4287 static irqreturn_t i40e_intr(int irq, void *data)
4289 struct i40e_pf *pf = (struct i40e_pf *)data;
4290 struct i40e_hw *hw = &pf->hw;
4291 irqreturn_t ret = IRQ_NONE;
4292 u32 icr0, icr0_remaining;
4295 icr0 = rd32(hw, I40E_PFINT_ICR0);
4296 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4298 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4299 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4302 /* if interrupt but no bits showing, must be SWINT */
4303 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4304 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4307 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
4308 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4309 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4310 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4311 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
4314 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4315 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4316 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4317 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4319 /* We do not have a way to disarm Queue causes while leaving
4320 * interrupt enabled for all other causes, ideally
4321 * interrupt should be disabled while we are in NAPI but
4322 * this is not a performance path and napi_schedule()
4323 * can deal with rescheduling.
4325 if (!test_bit(__I40E_DOWN, pf->state))
4326 napi_schedule_irqoff(&q_vector->napi);
4329 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4330 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4331 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
4332 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4335 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4336 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4337 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
4340 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4341 /* disable any further VFLR event notifications */
4342 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4343 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4345 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4346 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4348 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4349 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4353 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4354 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4355 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
4356 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4357 val = rd32(hw, I40E_GLGEN_RSTAT);
4358 val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val);
4359 if (val == I40E_RESET_CORER) {
4361 } else if (val == I40E_RESET_GLOBR) {
4363 } else if (val == I40E_RESET_EMPR) {
4365 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4369 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4370 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4371 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4372 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4373 rd32(hw, I40E_PFHMC_ERRORINFO),
4374 rd32(hw, I40E_PFHMC_ERRORDATA));
4377 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4378 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4380 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4381 schedule_work(&pf->ptp_extts0_work);
4383 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4384 i40e_ptp_tx_hwtstamp(pf);
4386 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4389 /* If a critical error is pending we have no choice but to reset the
4391 * Report and mask out any remaining unexpected interrupts.
4393 icr0_remaining = icr0 & ena_mask;
4394 if (icr0_remaining) {
4395 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4397 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4398 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4399 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4400 dev_info(&pf->pdev->dev, "device will be reset\n");
4401 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4402 i40e_service_event_schedule(pf);
4404 ena_mask &= ~icr0_remaining;
4409 /* re-enable interrupt causes */
4410 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4411 if (!test_bit(__I40E_DOWN, pf->state) ||
4412 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4413 i40e_service_event_schedule(pf);
4414 i40e_irq_dynamic_enable_icr0(pf);
4421 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4422 * @tx_ring: tx ring to clean
4423 * @budget: how many cleans we're allowed
4425 * Returns true if there's any budget left (e.g. the clean is finished)
4427 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4429 struct i40e_vsi *vsi = tx_ring->vsi;
4430 u16 i = tx_ring->next_to_clean;
4431 struct i40e_tx_buffer *tx_buf;
4432 struct i40e_tx_desc *tx_desc;
4434 tx_buf = &tx_ring->tx_bi[i];
4435 tx_desc = I40E_TX_DESC(tx_ring, i);
4436 i -= tx_ring->count;
4439 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4441 /* if next_to_watch is not set then there is no work pending */
4445 /* prevent any other reads prior to eop_desc */
4448 /* if the descriptor isn't done, no work yet to do */
4449 if (!(eop_desc->cmd_type_offset_bsz &
4450 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4453 /* clear next_to_watch to prevent false hangs */
4454 tx_buf->next_to_watch = NULL;
4456 tx_desc->buffer_addr = 0;
4457 tx_desc->cmd_type_offset_bsz = 0;
4458 /* move past filter desc */
4463 i -= tx_ring->count;
4464 tx_buf = tx_ring->tx_bi;
4465 tx_desc = I40E_TX_DESC(tx_ring, 0);
4467 /* unmap skb header data */
4468 dma_unmap_single(tx_ring->dev,
4469 dma_unmap_addr(tx_buf, dma),
4470 dma_unmap_len(tx_buf, len),
4472 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4473 kfree(tx_buf->raw_buf);
4475 tx_buf->raw_buf = NULL;
4476 tx_buf->tx_flags = 0;
4477 tx_buf->next_to_watch = NULL;
4478 dma_unmap_len_set(tx_buf, len, 0);
4479 tx_desc->buffer_addr = 0;
4480 tx_desc->cmd_type_offset_bsz = 0;
4482 /* move us past the eop_desc for start of next FD desc */
4487 i -= tx_ring->count;
4488 tx_buf = tx_ring->tx_bi;
4489 tx_desc = I40E_TX_DESC(tx_ring, 0);
4492 /* update budget accounting */
4494 } while (likely(budget));
4496 i += tx_ring->count;
4497 tx_ring->next_to_clean = i;
4499 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags))
4500 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4506 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4507 * @irq: interrupt number
4508 * @data: pointer to a q_vector
4510 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4512 struct i40e_q_vector *q_vector = data;
4513 struct i40e_vsi *vsi;
4515 if (!q_vector->tx.ring)
4518 vsi = q_vector->tx.ring->vsi;
4519 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4525 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4526 * @vsi: the VSI being configured
4527 * @v_idx: vector index
4528 * @qp_idx: queue pair index
4530 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4532 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4533 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4534 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4536 tx_ring->q_vector = q_vector;
4537 tx_ring->next = q_vector->tx.ring;
4538 q_vector->tx.ring = tx_ring;
4539 q_vector->tx.count++;
4541 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4542 if (i40e_enabled_xdp_vsi(vsi)) {
4543 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4545 xdp_ring->q_vector = q_vector;
4546 xdp_ring->next = q_vector->tx.ring;
4547 q_vector->tx.ring = xdp_ring;
4548 q_vector->tx.count++;
4551 rx_ring->q_vector = q_vector;
4552 rx_ring->next = q_vector->rx.ring;
4553 q_vector->rx.ring = rx_ring;
4554 q_vector->rx.count++;
4558 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4559 * @vsi: the VSI being configured
4561 * This function maps descriptor rings to the queue-specific vectors
4562 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4563 * one vector per queue pair, but on a constrained vector budget, we
4564 * group the queue pairs as "efficiently" as possible.
4566 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4568 int qp_remaining = vsi->num_queue_pairs;
4569 int q_vectors = vsi->num_q_vectors;
4574 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4575 * group them so there are multiple queues per vector.
4576 * It is also important to go through all the vectors available to be
4577 * sure that if we don't use all the vectors, that the remaining vectors
4578 * are cleared. This is especially important when decreasing the
4579 * number of queues in use.
4581 for (; v_start < q_vectors; v_start++) {
4582 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4584 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4586 q_vector->num_ringpairs = num_ringpairs;
4587 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4589 q_vector->rx.count = 0;
4590 q_vector->tx.count = 0;
4591 q_vector->rx.ring = NULL;
4592 q_vector->tx.ring = NULL;
4594 while (num_ringpairs--) {
4595 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4603 * i40e_vsi_request_irq - Request IRQ from the OS
4604 * @vsi: the VSI being configured
4605 * @basename: name for the vector
4607 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4609 struct i40e_pf *pf = vsi->back;
4612 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
4613 err = i40e_vsi_request_irq_msix(vsi, basename);
4614 else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags))
4615 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4618 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4622 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4627 #ifdef CONFIG_NET_POLL_CONTROLLER
4629 * i40e_netpoll - A Polling 'interrupt' handler
4630 * @netdev: network interface device structure
4632 * This is used by netconsole to send skbs without having to re-enable
4633 * interrupts. It's not called while the normal interrupt routine is executing.
4635 static void i40e_netpoll(struct net_device *netdev)
4637 struct i40e_netdev_priv *np = netdev_priv(netdev);
4638 struct i40e_vsi *vsi = np->vsi;
4639 struct i40e_pf *pf = vsi->back;
4642 /* if interface is down do nothing */
4643 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4646 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4647 for (i = 0; i < vsi->num_q_vectors; i++)
4648 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4650 i40e_intr(pf->pdev->irq, netdev);
4655 #define I40E_QTX_ENA_WAIT_COUNT 50
4658 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4659 * @pf: the PF being configured
4660 * @pf_q: the PF queue
4661 * @enable: enable or disable state of the queue
4663 * This routine will wait for the given Tx queue of the PF to reach the
4664 * enabled or disabled state.
4665 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4666 * multiple retries; else will return 0 in case of success.
4668 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4673 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4674 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4675 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4678 usleep_range(10, 20);
4680 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4687 * i40e_control_tx_q - Start or stop a particular Tx queue
4688 * @pf: the PF structure
4689 * @pf_q: the PF queue to configure
4690 * @enable: start or stop the queue
4692 * This function enables or disables a single queue. Note that any delay
4693 * required after the operation is expected to be handled by the caller of
4696 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4698 struct i40e_hw *hw = &pf->hw;
4702 /* warn the TX unit of coming changes */
4703 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4705 usleep_range(10, 20);
4707 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4708 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4709 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4710 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4712 usleep_range(1000, 2000);
4715 /* Skip if the queue is already in the requested state */
4716 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4719 /* turn on/off the queue */
4721 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4722 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4724 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4727 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4731 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4733 * @pf: the PF structure
4734 * @pf_q: the PF queue to configure
4735 * @is_xdp: true if the queue is used for XDP
4736 * @enable: start or stop the queue
4738 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4739 bool is_xdp, bool enable)
4743 i40e_control_tx_q(pf, pf_q, enable);
4745 /* wait for the change to finish */
4746 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4748 dev_info(&pf->pdev->dev,
4749 "VSI seid %d %sTx ring %d %sable timeout\n",
4750 seid, (is_xdp ? "XDP " : ""), pf_q,
4751 (enable ? "en" : "dis"));
4758 * i40e_vsi_enable_tx - Start a VSI's rings
4759 * @vsi: the VSI being configured
4761 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4763 struct i40e_pf *pf = vsi->back;
4764 int i, pf_q, ret = 0;
4766 pf_q = vsi->base_queue;
4767 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4768 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4770 false /*is xdp*/, true);
4774 if (!i40e_enabled_xdp_vsi(vsi))
4777 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4778 pf_q + vsi->alloc_queue_pairs,
4779 true /*is xdp*/, true);
4787 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4788 * @pf: the PF being configured
4789 * @pf_q: the PF queue
4790 * @enable: enable or disable state of the queue
4792 * This routine will wait for the given Rx queue of the PF to reach the
4793 * enabled or disabled state.
4794 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4795 * multiple retries; else will return 0 in case of success.
4797 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4802 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4803 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4804 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4807 usleep_range(10, 20);
4809 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4816 * i40e_control_rx_q - Start or stop a particular Rx queue
4817 * @pf: the PF structure
4818 * @pf_q: the PF queue to configure
4819 * @enable: start or stop the queue
4821 * This function enables or disables a single queue. Note that
4822 * any delay required after the operation is expected to be
4823 * handled by the caller of this function.
4825 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4827 struct i40e_hw *hw = &pf->hw;
4831 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4832 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4833 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4834 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4836 usleep_range(1000, 2000);
4839 /* Skip if the queue is already in the requested state */
4840 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4843 /* turn on/off the queue */
4845 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4847 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4849 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4853 * i40e_control_wait_rx_q
4854 * @pf: the PF structure
4855 * @pf_q: queue being configured
4856 * @enable: start or stop the rings
4858 * This function enables or disables a single queue along with waiting
4859 * for the change to finish. The caller of this function should handle
4860 * the delays needed in the case of disabling queues.
4862 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4866 i40e_control_rx_q(pf, pf_q, enable);
4868 /* wait for the change to finish */
4869 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4877 * i40e_vsi_enable_rx - Start a VSI's rings
4878 * @vsi: the VSI being configured
4880 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4882 struct i40e_pf *pf = vsi->back;
4883 int i, pf_q, ret = 0;
4885 pf_q = vsi->base_queue;
4886 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4887 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4889 dev_info(&pf->pdev->dev,
4890 "VSI seid %d Rx ring %d enable timeout\n",
4900 * i40e_vsi_start_rings - Start a VSI's rings
4901 * @vsi: the VSI being configured
4903 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4907 /* do rx first for enable and last for disable */
4908 ret = i40e_vsi_enable_rx(vsi);
4911 ret = i40e_vsi_enable_tx(vsi);
4916 #define I40E_DISABLE_TX_GAP_MSEC 50
4919 * i40e_vsi_stop_rings - Stop a VSI's rings
4920 * @vsi: the VSI being configured
4922 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4924 struct i40e_pf *pf = vsi->back;
4925 u32 pf_q, tx_q_end, rx_q_end;
4927 /* When port TX is suspended, don't wait */
4928 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4929 return i40e_vsi_stop_rings_no_wait(vsi);
4931 tx_q_end = vsi->base_queue +
4932 vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
4933 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
4934 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
4936 rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
4937 for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
4938 i40e_control_rx_q(pf, pf_q, false);
4940 msleep(I40E_DISABLE_TX_GAP_MSEC);
4941 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
4942 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4944 i40e_vsi_wait_queues_disabled(vsi);
4948 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4949 * @vsi: the VSI being shutdown
4951 * This function stops all the rings for a VSI but does not delay to verify
4952 * that rings have been disabled. It is expected that the caller is shutting
4953 * down multiple VSIs at once and will delay together for all the VSIs after
4954 * initiating the shutdown. This is particularly useful for shutting down lots
4955 * of VFs together. Otherwise, a large delay can be incurred while configuring
4956 * each VSI in serial.
4958 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4960 struct i40e_pf *pf = vsi->back;
4963 pf_q = vsi->base_queue;
4964 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4965 i40e_control_tx_q(pf, pf_q, false);
4966 i40e_control_rx_q(pf, pf_q, false);
4971 * i40e_vsi_free_irq - Free the irq association with the OS
4972 * @vsi: the VSI being configured
4974 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4976 struct i40e_pf *pf = vsi->back;
4977 struct i40e_hw *hw = &pf->hw;
4978 int base = vsi->base_vector;
4982 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
4983 if (!vsi->q_vectors)
4986 if (!vsi->irqs_ready)
4989 vsi->irqs_ready = false;
4990 for (i = 0; i < vsi->num_q_vectors; i++) {
4995 irq_num = pf->msix_entries[vector].vector;
4997 /* free only the irqs that were actually requested */
4998 if (!vsi->q_vectors[i] ||
4999 !vsi->q_vectors[i]->num_ringpairs)
5002 /* clear the affinity notifier in the IRQ descriptor */
5003 irq_set_affinity_notifier(irq_num, NULL);
5004 /* remove our suggested affinity mask for this IRQ */
5005 irq_update_affinity_hint(irq_num, NULL);
5006 free_irq(irq_num, vsi->q_vectors[i]);
5008 /* Tear down the interrupt queue link list
5010 * We know that they come in pairs and always
5011 * the Rx first, then the Tx. To clear the
5012 * link list, stick the EOL value into the
5013 * next_q field of the registers.
5015 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
5016 qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK,
5018 val |= I40E_QUEUE_END_OF_LIST
5019 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5020 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
5022 while (qp != I40E_QUEUE_END_OF_LIST) {
5025 val = rd32(hw, I40E_QINT_RQCTL(qp));
5027 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5028 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5029 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5030 I40E_QINT_RQCTL_INTEVENT_MASK);
5032 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5033 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5035 wr32(hw, I40E_QINT_RQCTL(qp), val);
5037 val = rd32(hw, I40E_QINT_TQCTL(qp));
5039 next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK,
5042 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5043 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5044 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5045 I40E_QINT_TQCTL_INTEVENT_MASK);
5047 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5048 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5050 wr32(hw, I40E_QINT_TQCTL(qp), val);
5055 free_irq(pf->pdev->irq, pf);
5057 val = rd32(hw, I40E_PFINT_LNKLST0);
5058 qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val);
5059 val |= I40E_QUEUE_END_OF_LIST
5060 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5061 wr32(hw, I40E_PFINT_LNKLST0, val);
5063 val = rd32(hw, I40E_QINT_RQCTL(qp));
5064 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5065 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5066 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5067 I40E_QINT_RQCTL_INTEVENT_MASK);
5069 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5070 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5072 wr32(hw, I40E_QINT_RQCTL(qp), val);
5074 val = rd32(hw, I40E_QINT_TQCTL(qp));
5076 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5077 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5078 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5079 I40E_QINT_TQCTL_INTEVENT_MASK);
5081 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5082 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5084 wr32(hw, I40E_QINT_TQCTL(qp), val);
5089 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5090 * @vsi: the VSI being configured
5091 * @v_idx: Index of vector to be freed
5093 * This function frees the memory allocated to the q_vector. In addition if
5094 * NAPI is enabled it will delete any references to the NAPI struct prior
5095 * to freeing the q_vector.
5097 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5099 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5100 struct i40e_ring *ring;
5105 /* disassociate q_vector from rings */
5106 i40e_for_each_ring(ring, q_vector->tx)
5107 ring->q_vector = NULL;
5109 i40e_for_each_ring(ring, q_vector->rx)
5110 ring->q_vector = NULL;
5112 /* only VSI w/ an associated netdev is set up w/ NAPI */
5114 netif_napi_del(&q_vector->napi);
5116 vsi->q_vectors[v_idx] = NULL;
5118 kfree_rcu(q_vector, rcu);
5122 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5123 * @vsi: the VSI being un-configured
5125 * This frees the memory allocated to the q_vectors and
5126 * deletes references to the NAPI struct.
5128 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5132 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5133 i40e_free_q_vector(vsi, v_idx);
5137 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5138 * @pf: board private structure
5140 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5142 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
5143 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
5144 pci_disable_msix(pf->pdev);
5145 kfree(pf->msix_entries);
5146 pf->msix_entries = NULL;
5147 kfree(pf->irq_pile);
5148 pf->irq_pile = NULL;
5149 } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
5150 pci_disable_msi(pf->pdev);
5152 clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
5153 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
5157 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5158 * @pf: board private structure
5160 * We go through and clear interrupt specific resources and reset the structure
5161 * to pre-load conditions
5163 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5165 struct i40e_vsi *vsi;
5168 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5169 i40e_free_misc_vector(pf);
5171 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
5172 I40E_IWARP_IRQ_PILE_ID);
5174 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
5176 i40e_pf_for_each_vsi(pf, i, vsi)
5177 i40e_vsi_free_q_vectors(vsi);
5179 i40e_reset_interrupt_capability(pf);
5183 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5184 * @vsi: the VSI being configured
5186 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5193 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5194 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5196 if (q_vector->rx.ring || q_vector->tx.ring)
5197 napi_enable(&q_vector->napi);
5202 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5203 * @vsi: the VSI being configured
5205 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5212 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5213 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5215 if (q_vector->rx.ring || q_vector->tx.ring)
5216 napi_disable(&q_vector->napi);
5221 * i40e_vsi_close - Shut down a VSI
5222 * @vsi: the vsi to be quelled
5224 static void i40e_vsi_close(struct i40e_vsi *vsi)
5226 struct i40e_pf *pf = vsi->back;
5227 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
5229 i40e_vsi_free_irq(vsi);
5230 i40e_vsi_free_tx_resources(vsi);
5231 i40e_vsi_free_rx_resources(vsi);
5232 vsi->current_netdev_flags = 0;
5233 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
5234 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5235 set_bit(__I40E_CLIENT_RESET, pf->state);
5239 * i40e_quiesce_vsi - Pause a given VSI
5240 * @vsi: the VSI being paused
5242 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5244 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5247 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
5248 if (vsi->netdev && netif_running(vsi->netdev))
5249 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5251 i40e_vsi_close(vsi);
5255 * i40e_unquiesce_vsi - Resume a given VSI
5256 * @vsi: the VSI being resumed
5258 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5260 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
5263 if (vsi->netdev && netif_running(vsi->netdev))
5264 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5266 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5270 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5273 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5275 struct i40e_vsi *vsi;
5278 i40e_pf_for_each_vsi(pf, v, vsi)
5279 i40e_quiesce_vsi(vsi);
5283 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5286 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5288 struct i40e_vsi *vsi;
5291 i40e_pf_for_each_vsi(pf, v, vsi)
5292 i40e_unquiesce_vsi(vsi);
5296 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5297 * @vsi: the VSI being configured
5299 * Wait until all queues on a given VSI have been disabled.
5301 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5303 struct i40e_pf *pf = vsi->back;
5306 pf_q = vsi->base_queue;
5307 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5308 /* Check and wait for the Tx queue */
5309 ret = i40e_pf_txq_wait(pf, pf_q, false);
5311 dev_info(&pf->pdev->dev,
5312 "VSI seid %d Tx ring %d disable timeout\n",
5317 if (!i40e_enabled_xdp_vsi(vsi))
5320 /* Check and wait for the XDP Tx queue */
5321 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
5324 dev_info(&pf->pdev->dev,
5325 "VSI seid %d XDP Tx ring %d disable timeout\n",
5330 /* Check and wait for the Rx queue */
5331 ret = i40e_pf_rxq_wait(pf, pf_q, false);
5333 dev_info(&pf->pdev->dev,
5334 "VSI seid %d Rx ring %d disable timeout\n",
5343 #ifdef CONFIG_I40E_DCB
5345 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5348 * This function waits for the queues to be in disabled state for all the
5349 * VSIs that are managed by this PF.
5351 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5353 struct i40e_vsi *vsi;
5356 i40e_pf_for_each_vsi(pf, v, vsi) {
5357 ret = i40e_vsi_wait_queues_disabled(vsi);
5368 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5369 * @pf: pointer to PF
5371 * Get TC map for ISCSI PF type that will include iSCSI TC
5374 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5376 struct i40e_dcb_app_priority_table app;
5377 struct i40e_hw *hw = &pf->hw;
5378 u8 enabled_tc = 1; /* TC0 is always enabled */
5380 /* Get the iSCSI APP TLV */
5381 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5383 for (i = 0; i < dcbcfg->numapps; i++) {
5384 app = dcbcfg->app[i];
5385 if (app.selector == I40E_APP_SEL_TCPIP &&
5386 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5387 tc = dcbcfg->etscfg.prioritytable[app.priority];
5388 enabled_tc |= BIT(tc);
5397 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5398 * @dcbcfg: the corresponding DCBx configuration structure
5400 * Return the number of TCs from given DCBx configuration
5402 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5404 int i, tc_unused = 0;
5408 /* Scan the ETS Config Priority Table to find
5409 * traffic class enabled for a given priority
5410 * and create a bitmask of enabled TCs
5412 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5413 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5415 /* Now scan the bitmask to check for
5416 * contiguous TCs starting with TC0
5418 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5419 if (num_tc & BIT(i)) {
5423 pr_err("Non-contiguous TC - Disabling DCB\n");
5431 /* There is always at least TC0 */
5439 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5440 * @dcbcfg: the corresponding DCBx configuration structure
5442 * Query the current DCB configuration and return the number of
5443 * traffic classes enabled from the given DCBX config
5445 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5447 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5451 for (i = 0; i < num_tc; i++)
5452 enabled_tc |= BIT(i);
5458 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5459 * @pf: PF being queried
5461 * Query the current MQPRIO configuration and return the number of
5462 * traffic classes enabled.
5464 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5466 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5467 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5468 u8 enabled_tc = 1, i;
5470 for (i = 1; i < num_tc; i++)
5471 enabled_tc |= BIT(i);
5476 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5477 * @pf: PF being queried
5479 * Return number of traffic classes enabled for the given PF
5481 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5483 struct i40e_hw *hw = &pf->hw;
5484 u8 i, enabled_tc = 1;
5486 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5488 if (i40e_is_tc_mqprio_enabled(pf))
5489 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5491 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5492 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
5495 /* SFP mode will be enabled for all TCs on port */
5496 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
5497 return i40e_dcb_get_num_tc(dcbcfg);
5499 /* MFP mode return count of enabled TCs for this PF */
5500 if (pf->hw.func_caps.iscsi)
5501 enabled_tc = i40e_get_iscsi_tc_map(pf);
5503 return 1; /* Only TC0 */
5505 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5506 if (enabled_tc & BIT(i))
5513 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5514 * @pf: PF being queried
5516 * Return a bitmap for enabled traffic classes for this PF.
5518 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5520 if (i40e_is_tc_mqprio_enabled(pf))
5521 return i40e_mqprio_get_enabled_tc(pf);
5523 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5526 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
5527 return I40E_DEFAULT_TRAFFIC_CLASS;
5529 /* SFP mode we want PF to be enabled for all TCs */
5530 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
5531 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5533 /* MFP enabled and iSCSI PF type */
5534 if (pf->hw.func_caps.iscsi)
5535 return i40e_get_iscsi_tc_map(pf);
5537 return I40E_DEFAULT_TRAFFIC_CLASS;
5541 * i40e_vsi_get_bw_info - Query VSI BW Information
5542 * @vsi: the VSI being queried
5544 * Returns 0 on success, negative value on failure
5546 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5548 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5549 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5550 struct i40e_pf *pf = vsi->back;
5551 struct i40e_hw *hw = &pf->hw;
5556 /* Get the VSI level BW configuration */
5557 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5559 dev_info(&pf->pdev->dev,
5560 "couldn't get PF vsi bw config, err %pe aq_err %s\n",
5562 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5566 /* Get the VSI level BW configuration per TC */
5567 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5570 dev_info(&pf->pdev->dev,
5571 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
5573 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5577 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5578 dev_info(&pf->pdev->dev,
5579 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5580 bw_config.tc_valid_bits,
5581 bw_ets_config.tc_valid_bits);
5582 /* Still continuing */
5585 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5586 vsi->bw_max_quanta = bw_config.max_bw;
5587 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5588 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5589 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5590 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5591 vsi->bw_ets_limit_credits[i] =
5592 le16_to_cpu(bw_ets_config.credits[i]);
5593 /* 3 bits out of 4 for each TC */
5594 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5601 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5602 * @vsi: the VSI being configured
5603 * @enabled_tc: TC bitmap
5604 * @bw_share: BW shared credits per TC
5606 * Returns 0 on success, negative value on failure
5608 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5611 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5612 struct i40e_pf *pf = vsi->back;
5616 /* There is no need to reset BW when mqprio mode is on. */
5617 if (i40e_is_tc_mqprio_enabled(pf))
5619 if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
5620 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5622 dev_info(&pf->pdev->dev,
5623 "Failed to reset tx rate for vsi->seid %u\n",
5627 memset(&bw_data, 0, sizeof(bw_data));
5628 bw_data.tc_valid_bits = enabled_tc;
5629 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5630 bw_data.tc_bw_credits[i] = bw_share[i];
5632 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5634 dev_info(&pf->pdev->dev,
5635 "AQ command Config VSI BW allocation per TC failed = %d\n",
5636 pf->hw.aq.asq_last_status);
5640 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5641 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5647 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5648 * @vsi: the VSI being configured
5649 * @enabled_tc: TC map to be enabled
5652 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5654 struct net_device *netdev = vsi->netdev;
5655 struct i40e_pf *pf = vsi->back;
5656 struct i40e_hw *hw = &pf->hw;
5659 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5665 netdev_reset_tc(netdev);
5669 /* Set up actual enabled TCs on the VSI */
5670 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5673 /* set per TC queues for the VSI */
5674 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5675 /* Only set TC queues for enabled tcs
5677 * e.g. For a VSI that has TC0 and TC3 enabled the
5678 * enabled_tc bitmap would be 0x00001001; the driver
5679 * will set the numtc for netdev as 2 that will be
5680 * referenced by the netdev layer as TC 0 and 1.
5682 if (vsi->tc_config.enabled_tc & BIT(i))
5683 netdev_set_tc_queue(netdev,
5684 vsi->tc_config.tc_info[i].netdev_tc,
5685 vsi->tc_config.tc_info[i].qcount,
5686 vsi->tc_config.tc_info[i].qoffset);
5689 if (i40e_is_tc_mqprio_enabled(pf))
5692 /* Assign UP2TC map for the VSI */
5693 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5694 /* Get the actual TC# for the UP */
5695 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5696 /* Get the mapped netdev TC# for the UP */
5697 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5698 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5703 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5704 * @vsi: the VSI being configured
5705 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5707 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5708 struct i40e_vsi_context *ctxt)
5710 /* copy just the sections touched not the entire info
5711 * since not all sections are valid as returned by
5714 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5715 memcpy(&vsi->info.queue_mapping,
5716 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5717 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5718 sizeof(vsi->info.tc_mapping));
5722 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5723 * @vsi: the VSI being reconfigured
5724 * @vsi_offset: offset from main VF VSI
5726 int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5728 struct i40e_vsi_context ctxt = {};
5738 ctxt.seid = vsi->seid;
5739 ctxt.pf_num = hw->pf_id;
5740 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5741 ctxt.uplink_seid = vsi->uplink_seid;
5742 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5743 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5744 ctxt.info = vsi->info;
5746 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
5748 if (vsi->reconfig_rss) {
5749 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5750 vsi->num_queue_pairs);
5751 ret = i40e_vsi_config_rss(vsi);
5753 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5756 vsi->reconfig_rss = false;
5759 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5761 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
5763 i40e_aq_str(hw, hw->aq.asq_last_status));
5766 /* update the local VSI info with updated queue map */
5767 i40e_vsi_update_queue_map(vsi, &ctxt);
5768 vsi->info.valid_sections = 0;
5774 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5775 * @vsi: VSI to be configured
5776 * @enabled_tc: TC bitmap
5778 * This configures a particular VSI for TCs that are mapped to the
5779 * given TC bitmap. It uses default bandwidth share for TCs across
5780 * VSIs to configure TC for a particular VSI.
5783 * It is expected that the VSI queues have been quisced before calling
5786 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5788 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5789 struct i40e_pf *pf = vsi->back;
5790 struct i40e_hw *hw = &pf->hw;
5791 struct i40e_vsi_context ctxt;
5795 /* Check if enabled_tc is same as existing or new TCs */
5796 if (vsi->tc_config.enabled_tc == enabled_tc &&
5797 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5800 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5801 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5802 if (enabled_tc & BIT(i))
5806 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5808 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5810 dev_info(&pf->pdev->dev,
5811 "Failed configuring TC map %d for VSI %d\n",
5812 enabled_tc, vsi->seid);
5813 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5816 dev_info(&pf->pdev->dev,
5817 "Failed querying vsi bw info, err %pe aq_err %s\n",
5819 i40e_aq_str(hw, hw->aq.asq_last_status));
5822 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5823 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5826 valid_tc = bw_config.tc_valid_bits;
5827 /* Always enable TC0, no matter what */
5829 dev_info(&pf->pdev->dev,
5830 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5831 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5832 enabled_tc = valid_tc;
5835 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5837 dev_err(&pf->pdev->dev,
5838 "Unable to configure TC map %d for VSI %d\n",
5839 enabled_tc, vsi->seid);
5844 /* Update Queue Pairs Mapping for currently enabled UPs */
5845 ctxt.seid = vsi->seid;
5846 ctxt.pf_num = vsi->back->hw.pf_id;
5848 ctxt.uplink_seid = vsi->uplink_seid;
5849 ctxt.info = vsi->info;
5850 if (i40e_is_tc_mqprio_enabled(pf)) {
5851 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5855 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5858 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5861 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5862 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5863 vsi->num_queue_pairs);
5864 ret = i40e_vsi_config_rss(vsi);
5866 dev_info(&vsi->back->pdev->dev,
5867 "Failed to reconfig rss for num_queues\n");
5870 vsi->reconfig_rss = false;
5872 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
5873 ctxt.info.valid_sections |=
5874 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5875 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5878 /* Update the VSI after updating the VSI queue-mapping
5881 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5883 dev_info(&pf->pdev->dev,
5884 "Update vsi tc config failed, err %pe aq_err %s\n",
5886 i40e_aq_str(hw, hw->aq.asq_last_status));
5889 /* update the local VSI info with updated queue map */
5890 i40e_vsi_update_queue_map(vsi, &ctxt);
5891 vsi->info.valid_sections = 0;
5893 /* Update current VSI BW information */
5894 ret = i40e_vsi_get_bw_info(vsi);
5896 dev_info(&pf->pdev->dev,
5897 "Failed updating vsi bw info, err %pe aq_err %s\n",
5899 i40e_aq_str(hw, hw->aq.asq_last_status));
5903 /* Update the netdev TC setup */
5904 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5910 * i40e_get_link_speed - Returns link speed for the interface
5911 * @vsi: VSI to be configured
5914 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5916 struct i40e_pf *pf = vsi->back;
5918 switch (pf->hw.phy.link_info.link_speed) {
5919 case I40E_LINK_SPEED_40GB:
5921 case I40E_LINK_SPEED_25GB:
5923 case I40E_LINK_SPEED_20GB:
5925 case I40E_LINK_SPEED_10GB:
5927 case I40E_LINK_SPEED_1GB:
5935 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5936 * @vsi: Pointer to vsi structure
5937 * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5939 * Helper function to convert units before send to set BW limit
5941 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5943 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5944 dev_warn(&vsi->back->pdev->dev,
5945 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5946 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5948 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5955 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5956 * @vsi: VSI to be configured
5957 * @seid: seid of the channel/VSI
5958 * @max_tx_rate: max TX rate to be configured as BW limit
5960 * Helper function to set BW limit for a given VSI
5962 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5964 struct i40e_pf *pf = vsi->back;
5969 speed = i40e_get_link_speed(vsi);
5970 if (max_tx_rate > speed) {
5971 dev_err(&pf->pdev->dev,
5972 "Invalid max tx rate %llu specified for VSI seid %d.",
5976 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5977 dev_warn(&pf->pdev->dev,
5978 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5979 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5982 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5983 credits = max_tx_rate;
5984 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5985 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5986 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5988 dev_err(&pf->pdev->dev,
5989 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
5990 max_tx_rate, seid, ERR_PTR(ret),
5991 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5996 * i40e_remove_queue_channels - Remove queue channels for the TCs
5997 * @vsi: VSI to be configured
5999 * Remove queue channels for the TCs
6001 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
6003 enum i40e_admin_queue_err last_aq_status;
6004 struct i40e_cloud_filter *cfilter;
6005 struct i40e_channel *ch, *ch_tmp;
6006 struct i40e_pf *pf = vsi->back;
6007 struct hlist_node *node;
6010 /* Reset rss size that was stored when reconfiguring rss for
6011 * channel VSIs with non-power-of-2 queue count.
6013 vsi->current_rss_size = 0;
6015 /* perform cleanup for channels if they exist */
6016 if (list_empty(&vsi->ch_list))
6019 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6020 struct i40e_vsi *p_vsi;
6022 list_del(&ch->list);
6023 p_vsi = ch->parent_vsi;
6024 if (!p_vsi || !ch->initialized) {
6028 /* Reset queue contexts */
6029 for (i = 0; i < ch->num_queue_pairs; i++) {
6030 struct i40e_ring *tx_ring, *rx_ring;
6033 pf_q = ch->base_queue + i;
6034 tx_ring = vsi->tx_rings[pf_q];
6037 rx_ring = vsi->rx_rings[pf_q];
6041 /* Reset BW configured for this VSI via mqprio */
6042 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
6044 dev_info(&vsi->back->pdev->dev,
6045 "Failed to reset tx rate for ch->seid %u\n",
6048 /* delete cloud filters associated with this channel */
6049 hlist_for_each_entry_safe(cfilter, node,
6050 &pf->cloud_filter_list, cloud_node) {
6051 if (cfilter->seid != ch->seid)
6054 hash_del(&cfilter->cloud_node);
6055 if (cfilter->dst_port)
6056 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6060 ret = i40e_add_del_cloud_filter(vsi, cfilter,
6062 last_aq_status = pf->hw.aq.asq_last_status;
6064 dev_info(&pf->pdev->dev,
6065 "Failed to delete cloud filter, err %pe aq_err %s\n",
6067 i40e_aq_str(&pf->hw, last_aq_status));
6071 /* delete VSI from FW */
6072 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
6075 dev_err(&vsi->back->pdev->dev,
6076 "unable to remove channel (%d) for parent VSI(%d)\n",
6077 ch->seid, p_vsi->seid);
6080 INIT_LIST_HEAD(&vsi->ch_list);
6084 * i40e_get_max_queues_for_channel
6085 * @vsi: ptr to VSI to which channels are associated with
6087 * Helper function which returns max value among the queue counts set on the
6088 * channels/TCs created.
6090 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6092 struct i40e_channel *ch, *ch_tmp;
6095 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6096 if (!ch->initialized)
6098 if (ch->num_queue_pairs > max)
6099 max = ch->num_queue_pairs;
6106 * i40e_validate_num_queues - validate num_queues w.r.t channel
6107 * @pf: ptr to PF device
6108 * @num_queues: number of queues
6109 * @vsi: the parent VSI
6110 * @reconfig_rss: indicates should the RSS be reconfigured or not
6112 * This function validates number of queues in the context of new channel
6113 * which is being established and determines if RSS should be reconfigured
6114 * or not for parent VSI.
6116 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6117 struct i40e_vsi *vsi, bool *reconfig_rss)
6124 *reconfig_rss = false;
6125 if (vsi->current_rss_size) {
6126 if (num_queues > vsi->current_rss_size) {
6127 dev_dbg(&pf->pdev->dev,
6128 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6129 num_queues, vsi->current_rss_size);
6131 } else if ((num_queues < vsi->current_rss_size) &&
6132 (!is_power_of_2(num_queues))) {
6133 dev_dbg(&pf->pdev->dev,
6134 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6135 num_queues, vsi->current_rss_size);
6140 if (!is_power_of_2(num_queues)) {
6141 /* Find the max num_queues configured for channel if channel
6143 * if channel exist, then enforce 'num_queues' to be more than
6144 * max ever queues configured for channel.
6146 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6147 if (num_queues < max_ch_queues) {
6148 dev_dbg(&pf->pdev->dev,
6149 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6150 num_queues, max_ch_queues);
6153 *reconfig_rss = true;
6160 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6161 * @vsi: the VSI being setup
6162 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
6164 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
6166 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6168 struct i40e_pf *pf = vsi->back;
6169 u8 seed[I40E_HKEY_ARRAY_SIZE];
6170 struct i40e_hw *hw = &pf->hw;
6178 if (rss_size > vsi->rss_size)
6181 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6182 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
6186 /* Ignoring user configured lut if there is one */
6187 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
6189 /* Use user configured hash key if there is one, otherwise
6192 if (vsi->rss_hkey_user)
6193 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6195 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
6197 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
6199 dev_info(&pf->pdev->dev,
6200 "Cannot set RSS lut, err %pe aq_err %s\n",
6202 i40e_aq_str(hw, hw->aq.asq_last_status));
6208 /* Do the update w.r.t. storing rss_size */
6209 if (!vsi->orig_rss_size)
6210 vsi->orig_rss_size = vsi->rss_size;
6211 vsi->current_rss_size = local_rss_size;
6217 * i40e_channel_setup_queue_map - Setup a channel queue map
6218 * @pf: ptr to PF device
6219 * @ctxt: VSI context structure
6220 * @ch: ptr to channel structure
6222 * Setup queue map for a specific channel
6224 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6225 struct i40e_vsi_context *ctxt,
6226 struct i40e_channel *ch)
6228 u16 qcount, qmap, sections = 0;
6232 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6233 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6235 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6236 ch->num_queue_pairs = qcount;
6238 /* find the next higher power-of-2 of num queue pairs */
6239 pow = ilog2(qcount);
6240 if (!is_power_of_2(qcount))
6243 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6244 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6246 /* Setup queue TC[0].qmap for given VSI context */
6247 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6249 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6250 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6251 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6252 ctxt->info.valid_sections |= cpu_to_le16(sections);
6256 * i40e_add_channel - add a channel by adding VSI
6257 * @pf: ptr to PF device
6258 * @uplink_seid: underlying HW switching element (VEB) ID
6259 * @ch: ptr to channel structure
6261 * Add a channel (VSI) using add_vsi and queue_map
6263 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6264 struct i40e_channel *ch)
6266 struct i40e_hw *hw = &pf->hw;
6267 struct i40e_vsi_context ctxt;
6268 u8 enabled_tc = 0x1; /* TC0 enabled */
6271 if (ch->type != I40E_VSI_VMDQ2) {
6272 dev_info(&pf->pdev->dev,
6273 "add new vsi failed, ch->type %d\n", ch->type);
6277 memset(&ctxt, 0, sizeof(ctxt));
6278 ctxt.pf_num = hw->pf_id;
6280 ctxt.uplink_seid = uplink_seid;
6281 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6282 if (ch->type == I40E_VSI_VMDQ2)
6283 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6285 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
6286 ctxt.info.valid_sections |=
6287 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6288 ctxt.info.switch_id =
6289 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6292 /* Set queue map for a given VSI context */
6293 i40e_channel_setup_queue_map(pf, &ctxt, ch);
6295 /* Now time to create VSI */
6296 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6298 dev_info(&pf->pdev->dev,
6299 "add new vsi failed, err %pe aq_err %s\n",
6301 i40e_aq_str(&pf->hw,
6302 pf->hw.aq.asq_last_status));
6306 /* Success, update channel, set enabled_tc only if the channel
6309 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6310 ch->seid = ctxt.seid;
6311 ch->vsi_number = ctxt.vsi_number;
6312 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6314 /* copy just the sections touched not the entire info
6315 * since not all sections are valid as returned by
6318 ch->info.mapping_flags = ctxt.info.mapping_flags;
6319 memcpy(&ch->info.queue_mapping,
6320 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6321 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6322 sizeof(ctxt.info.tc_mapping));
6327 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6330 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6334 memset(&bw_data, 0, sizeof(bw_data));
6335 bw_data.tc_valid_bits = ch->enabled_tc;
6336 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6337 bw_data.tc_bw_credits[i] = bw_share[i];
6339 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
6342 dev_info(&vsi->back->pdev->dev,
6343 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6344 vsi->back->hw.aq.asq_last_status, ch->seid);
6348 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6349 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6355 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6356 * @pf: ptr to PF device
6357 * @vsi: the VSI being setup
6358 * @ch: ptr to channel structure
6360 * Configure TX rings associated with channel (VSI) since queues are being
6363 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6364 struct i40e_vsi *vsi,
6365 struct i40e_channel *ch)
6367 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6371 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6372 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6373 if (ch->enabled_tc & BIT(i))
6377 /* configure BW for new VSI */
6378 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6380 dev_info(&vsi->back->pdev->dev,
6381 "Failed configuring TC map %d for channel (seid %u)\n",
6382 ch->enabled_tc, ch->seid);
6386 for (i = 0; i < ch->num_queue_pairs; i++) {
6387 struct i40e_ring *tx_ring, *rx_ring;
6390 pf_q = ch->base_queue + i;
6392 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6395 tx_ring = vsi->tx_rings[pf_q];
6398 /* Get the RX ring ptr */
6399 rx_ring = vsi->rx_rings[pf_q];
6407 * i40e_setup_hw_channel - setup new channel
6408 * @pf: ptr to PF device
6409 * @vsi: the VSI being setup
6410 * @ch: ptr to channel structure
6411 * @uplink_seid: underlying HW switching element (VEB) ID
6412 * @type: type of channel to be created (VMDq2/VF)
6414 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6415 * and configures TX rings accordingly
6417 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6418 struct i40e_vsi *vsi,
6419 struct i40e_channel *ch,
6420 u16 uplink_seid, u8 type)
6424 ch->initialized = false;
6425 ch->base_queue = vsi->next_base_queue;
6428 /* Proceed with creation of channel (VMDq2) VSI */
6429 ret = i40e_add_channel(pf, uplink_seid, ch);
6431 dev_info(&pf->pdev->dev,
6432 "failed to add_channel using uplink_seid %u\n",
6437 /* Mark the successful creation of channel */
6438 ch->initialized = true;
6440 /* Reconfigure TX queues using QTX_CTL register */
6441 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6443 dev_info(&pf->pdev->dev,
6444 "failed to configure TX rings for channel %u\n",
6449 /* update 'next_base_queue' */
6450 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6451 dev_dbg(&pf->pdev->dev,
6452 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6453 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6454 ch->num_queue_pairs,
6455 vsi->next_base_queue);
6460 * i40e_setup_channel - setup new channel using uplink element
6461 * @pf: ptr to PF device
6462 * @vsi: pointer to the VSI to set up the channel within
6463 * @ch: ptr to channel structure
6465 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6466 * and uplink switching element (uplink_seid)
6468 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6469 struct i40e_channel *ch)
6475 if (vsi->type == I40E_VSI_MAIN) {
6476 vsi_type = I40E_VSI_VMDQ2;
6478 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6483 /* underlying switching element */
6484 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6486 /* create channel (VSI), configure TX rings */
6487 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6489 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6493 return ch->initialized ? true : false;
6497 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6498 * @vsi: ptr to VSI which has PF backing
6500 * Sets up switch mode correctly if it needs to be changed and perform
6501 * what are allowed modes.
6503 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6506 struct i40e_pf *pf = vsi->back;
6507 struct i40e_hw *hw = &pf->hw;
6510 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6514 if (hw->dev_caps.switch_mode) {
6515 /* if switch mode is set, support mode2 (non-tunneled for
6516 * cloud filter) for now
6518 u32 switch_mode = hw->dev_caps.switch_mode &
6519 I40E_SWITCH_MODE_MASK;
6520 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6521 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6523 dev_err(&pf->pdev->dev,
6524 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6525 hw->dev_caps.switch_mode);
6530 /* Set Bit 7 to be valid */
6531 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6533 /* Set L4type for TCP support */
6534 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6536 /* Set cloud filter mode */
6537 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6539 /* Prep mode field for set_switch_config */
6540 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6541 pf->last_sw_conf_valid_flags,
6543 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6544 dev_err(&pf->pdev->dev,
6545 "couldn't set switch config bits, err %pe aq_err %s\n",
6548 hw->aq.asq_last_status));
6554 * i40e_create_queue_channel - function to create channel
6555 * @vsi: VSI to be configured
6556 * @ch: ptr to channel (it contains channel specific params)
6558 * This function creates channel (VSI) using num_queues specified by user,
6559 * reconfigs RSS if needed.
6561 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6562 struct i40e_channel *ch)
6564 struct i40e_pf *pf = vsi->back;
6571 if (!ch->num_queue_pairs) {
6572 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6573 ch->num_queue_pairs);
6577 /* validate user requested num_queues for channel */
6578 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6581 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6582 ch->num_queue_pairs);
6586 /* By default we are in VEPA mode, if this is the first VF/VMDq
6587 * VSI to be added switch to VEB mode.
6590 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
6591 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
6593 if (vsi->type == I40E_VSI_MAIN) {
6594 if (i40e_is_tc_mqprio_enabled(pf))
6595 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
6597 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6599 /* now onwards for main VSI, number of queues will be value
6600 * of TC0's queue count
6604 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6605 * it should be more than num_queues
6607 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6608 dev_dbg(&pf->pdev->dev,
6609 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6610 vsi->cnt_q_avail, ch->num_queue_pairs);
6614 /* reconfig_rss only if vsi type is MAIN_VSI */
6615 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6616 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6618 dev_info(&pf->pdev->dev,
6619 "Error: unable to reconfig rss for num_queues (%u)\n",
6620 ch->num_queue_pairs);
6625 if (!i40e_setup_channel(pf, vsi, ch)) {
6626 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6630 dev_info(&pf->pdev->dev,
6631 "Setup channel (id:%u) utilizing num_queues %d\n",
6632 ch->seid, ch->num_queue_pairs);
6634 /* configure VSI for BW limit */
6635 if (ch->max_tx_rate) {
6636 u64 credits = ch->max_tx_rate;
6638 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6641 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6642 dev_dbg(&pf->pdev->dev,
6643 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6649 /* in case of VF, this will be main SRIOV VSI */
6650 ch->parent_vsi = vsi;
6652 /* and update main_vsi's count for queue_available to use */
6653 vsi->cnt_q_avail -= ch->num_queue_pairs;
6659 * i40e_configure_queue_channels - Add queue channel for the given TCs
6660 * @vsi: VSI to be configured
6662 * Configures queue channel mapping to the given TCs
6664 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6666 struct i40e_channel *ch;
6670 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6671 vsi->tc_seid_map[0] = vsi->seid;
6672 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6673 if (vsi->tc_config.enabled_tc & BIT(i)) {
6674 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6680 INIT_LIST_HEAD(&ch->list);
6681 ch->num_queue_pairs =
6682 vsi->tc_config.tc_info[i].qcount;
6684 vsi->tc_config.tc_info[i].qoffset;
6686 /* Bandwidth limit through tc interface is in bytes/s,
6689 max_rate = vsi->mqprio_qopt.max_rate[i];
6690 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6691 ch->max_tx_rate = max_rate;
6693 list_add_tail(&ch->list, &vsi->ch_list);
6695 ret = i40e_create_queue_channel(vsi, ch);
6697 dev_err(&vsi->back->pdev->dev,
6698 "Failed creating queue channel with TC%d: queues %d\n",
6699 i, ch->num_queue_pairs);
6702 vsi->tc_seid_map[i] = ch->seid;
6706 /* reset to reconfigure TX queue contexts */
6707 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
6711 i40e_remove_queue_channels(vsi);
6716 * i40e_veb_config_tc - Configure TCs for given VEB
6718 * @enabled_tc: TC bitmap
6720 * Configures given TC bitmap for VEB (switching) element
6722 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6724 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6725 struct i40e_pf *pf = veb->pf;
6729 /* No TCs or already enabled TCs just return */
6730 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6733 bw_data.tc_valid_bits = enabled_tc;
6734 /* bw_data.absolute_credits is not set (relative) */
6736 /* Enable ETS TCs with equal BW Share for now */
6737 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6738 if (enabled_tc & BIT(i))
6739 bw_data.tc_bw_share_credits[i] = 1;
6742 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6745 dev_info(&pf->pdev->dev,
6746 "VEB bw config failed, err %pe aq_err %s\n",
6748 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6752 /* Update the BW information */
6753 ret = i40e_veb_get_bw_info(veb);
6755 dev_info(&pf->pdev->dev,
6756 "Failed getting veb bw config, err %pe aq_err %s\n",
6758 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6765 #ifdef CONFIG_I40E_DCB
6767 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6770 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6771 * the caller would've quiesce all the VSIs before calling
6774 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6776 struct i40e_vsi *vsi;
6777 struct i40e_veb *veb;
6782 /* Enable the TCs available on PF to all VEBs */
6783 tc_map = i40e_pf_get_tc_map(pf);
6784 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6787 i40e_pf_for_each_veb(pf, v, veb) {
6788 ret = i40e_veb_config_tc(veb, tc_map);
6790 dev_info(&pf->pdev->dev,
6791 "Failed configuring TC for VEB seid=%d\n",
6793 /* Will try to configure as many components */
6797 /* Update each VSI */
6798 i40e_pf_for_each_vsi(pf, v, vsi) {
6799 /* - Enable all TCs for the LAN VSI
6800 * - For all others keep them at TC0 for now
6802 if (v == pf->lan_vsi)
6803 tc_map = i40e_pf_get_tc_map(pf);
6805 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6807 ret = i40e_vsi_config_tc(vsi, tc_map);
6809 dev_info(&pf->pdev->dev,
6810 "Failed configuring TC for VSI seid=%d\n",
6812 /* Will try to configure as many components */
6814 /* Re-configure VSI vectors based on updated TC map */
6815 i40e_vsi_map_rings_to_vectors(vsi);
6817 i40e_dcbnl_set_all(vsi);
6823 * i40e_resume_port_tx - Resume port Tx
6826 * Resume a port's Tx and issue a PF reset in case of failure to
6829 static int i40e_resume_port_tx(struct i40e_pf *pf)
6831 struct i40e_hw *hw = &pf->hw;
6834 ret = i40e_aq_resume_port_tx(hw, NULL);
6836 dev_info(&pf->pdev->dev,
6837 "Resume Port Tx failed, err %pe aq_err %s\n",
6839 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6840 /* Schedule PF reset to recover */
6841 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6842 i40e_service_event_schedule(pf);
6849 * i40e_suspend_port_tx - Suspend port Tx
6852 * Suspend a port's Tx and issue a PF reset in case of failure.
6854 static int i40e_suspend_port_tx(struct i40e_pf *pf)
6856 struct i40e_hw *hw = &pf->hw;
6859 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
6861 dev_info(&pf->pdev->dev,
6862 "Suspend Port Tx failed, err %pe aq_err %s\n",
6864 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6865 /* Schedule PF reset to recover */
6866 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6867 i40e_service_event_schedule(pf);
6874 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6875 * @pf: PF being configured
6876 * @new_cfg: New DCBX configuration
6878 * Program DCB settings into HW and reconfigure VEB/VSIs on
6879 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6881 static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6882 struct i40e_dcbx_config *new_cfg)
6884 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6887 /* Check if need reconfiguration */
6888 if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
6889 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6893 /* Config change disable all VSIs */
6894 i40e_pf_quiesce_all_vsi(pf);
6896 /* Copy the new config to the current config */
6897 *old_cfg = *new_cfg;
6898 old_cfg->etsrec = old_cfg->etscfg;
6899 ret = i40e_set_dcb_config(&pf->hw);
6901 dev_info(&pf->pdev->dev,
6902 "Set DCB Config failed, err %pe aq_err %s\n",
6904 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6908 /* Changes in configuration update VEB/VSI */
6909 i40e_dcb_reconfigure(pf);
6911 /* In case of reset do not try to resume anything */
6912 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6913 /* Re-start the VSIs if disabled */
6914 ret = i40e_resume_port_tx(pf);
6915 /* In case of error no point in resuming VSIs */
6918 i40e_pf_unquiesce_all_vsi(pf);
6925 * i40e_hw_dcb_config - Program new DCBX settings into HW
6926 * @pf: PF being configured
6927 * @new_cfg: New DCBX configuration
6929 * Program DCB settings into HW and reconfigure VEB/VSIs on
6932 int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6934 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6935 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6936 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6937 struct i40e_dcbx_config *old_cfg;
6938 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6939 struct i40e_rx_pb_config pb_cfg;
6940 struct i40e_hw *hw = &pf->hw;
6941 u8 num_ports = hw->num_ports;
6949 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6950 /* Un-pack information to Program ETS HW via shared API
6953 * ETS/NON-ETS arbiter mode
6954 * max exponent (credit refills)
6955 * Total number of ports
6956 * PFC priority bit-map
6959 * Arbiter mode between UPs sharing same TC
6960 * TSA table (ETS or non-ETS)
6961 * EEE enabled or not
6965 new_numtc = i40e_dcb_get_num_tc(new_cfg);
6967 memset(&ets_data, 0, sizeof(ets_data));
6968 for (i = 0; i < new_numtc; i++) {
6970 switch (new_cfg->etscfg.tsatable[i]) {
6971 case I40E_IEEE_TSA_ETS:
6972 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6973 ets_data.tc_bw_share_credits[i] =
6974 new_cfg->etscfg.tcbwtable[i];
6976 case I40E_IEEE_TSA_STRICT:
6977 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6979 ets_data.tc_bw_share_credits[i] =
6980 I40E_DCB_STRICT_PRIO_CREDITS;
6983 /* Invalid TSA type */
6984 need_reconfig = false;
6989 old_cfg = &hw->local_dcbx_config;
6990 /* Check if need reconfiguration */
6991 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6993 /* If needed, enable/disable frame tagging, disable all VSIs
6994 * and suspend port tx
6996 if (need_reconfig) {
6997 /* Enable DCB tagging only when more than one TC */
6999 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
7001 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7003 set_bit(__I40E_PORT_SUSPENDED, pf->state);
7004 /* Reconfiguration needed quiesce all VSIs */
7005 i40e_pf_quiesce_all_vsi(pf);
7006 ret = i40e_suspend_port_tx(pf);
7011 /* Configure Port ETS Tx Scheduler */
7012 ets_data.tc_valid_bits = tc_map;
7013 ets_data.tc_strict_priority_flags = lltc_map;
7014 ret = i40e_aq_config_switch_comp_ets
7015 (hw, pf->mac_seid, &ets_data,
7016 i40e_aqc_opc_modify_switching_comp_ets, NULL);
7018 dev_info(&pf->pdev->dev,
7019 "Modify Port ETS failed, err %pe aq_err %s\n",
7021 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7025 /* Configure Rx ETS HW */
7026 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
7027 i40e_dcb_hw_set_num_tc(hw, new_numtc);
7028 i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
7029 I40E_DCB_ARB_MODE_STRICT_PRIORITY,
7030 I40E_DCB_DEFAULT_MAX_EXPONENT,
7032 i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
7033 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
7035 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
7036 new_cfg->etscfg.prioritytable);
7037 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
7039 /* Configure Rx Packet Buffers in HW */
7040 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7041 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
7042 mfs_tc[i] += I40E_PACKET_HDR_PAD;
7045 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
7046 false, new_cfg->pfc.pfcenable,
7048 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
7050 /* Update the local Rx Packet buffer config */
7051 pf->pb_cfg = pb_cfg;
7053 /* Inform the FW about changes to DCB configuration */
7054 ret = i40e_aq_dcb_updated(&pf->hw, NULL);
7056 dev_info(&pf->pdev->dev,
7057 "DCB Updated failed, err %pe aq_err %s\n",
7059 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7063 /* Update the port DCBx configuration */
7064 *old_cfg = *new_cfg;
7066 /* Changes in configuration update VEB/VSI */
7067 i40e_dcb_reconfigure(pf);
7069 /* Re-start the VSIs if disabled */
7070 if (need_reconfig) {
7071 ret = i40e_resume_port_tx(pf);
7073 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
7074 /* In case of error no point in resuming VSIs */
7078 /* Wait for the PF's queues to be disabled */
7079 ret = i40e_pf_wait_queues_disabled(pf);
7081 /* Schedule PF reset to recover */
7082 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
7083 i40e_service_event_schedule(pf);
7086 i40e_pf_unquiesce_all_vsi(pf);
7087 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7088 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
7090 /* registers are set, lets apply */
7091 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps))
7092 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7100 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7101 * @pf: PF being queried
7103 * Set default DCB configuration in case DCB is to be done in SW.
7105 int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7107 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7108 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7109 struct i40e_hw *hw = &pf->hw;
7112 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) {
7113 /* Update the local cached instance with TC0 ETS */
7114 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7115 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7116 pf->tmp_cfg.etscfg.maxtcs = 0;
7117 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7118 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7119 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7120 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7121 /* FW needs one App to configure HW */
7122 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7123 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7124 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7125 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7127 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
7130 memset(&ets_data, 0, sizeof(ets_data));
7131 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
7132 ets_data.tc_strict_priority_flags = 0; /* ETS */
7133 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
7135 /* Enable ETS on the Physical port */
7136 err = i40e_aq_config_switch_comp_ets
7137 (hw, pf->mac_seid, &ets_data,
7138 i40e_aqc_opc_enable_switching_comp_ets, NULL);
7140 dev_info(&pf->pdev->dev,
7141 "Enable Port ETS failed, err %pe aq_err %s\n",
7143 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7148 /* Update the local cached instance with TC0 ETS */
7149 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7150 dcb_cfg->etscfg.cbs = 0;
7151 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7152 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7159 * i40e_init_pf_dcb - Initialize DCB configuration
7160 * @pf: PF being configured
7162 * Query the current DCB configuration and cache it
7163 * in the hardware structure
7165 static int i40e_init_pf_dcb(struct i40e_pf *pf)
7167 struct i40e_hw *hw = &pf->hw;
7170 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
7171 * Also do not enable DCBx if FW LLDP agent is disabled
7173 if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) {
7174 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7178 if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) {
7179 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7180 err = i40e_dcb_sw_default_config(pf);
7182 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7185 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7186 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7187 DCB_CAP_DCBX_VER_IEEE;
7188 /* at init capable but disabled */
7189 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
7190 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7193 err = i40e_init_dcb(hw, true);
7195 /* Device/Function is not DCBX capable */
7196 if ((!hw->func_caps.dcb) ||
7197 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7198 dev_info(&pf->pdev->dev,
7199 "DCBX offload is not supported or is disabled for this PF.\n");
7201 /* When status is not DISABLED then DCBX in FW */
7202 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7203 DCB_CAP_DCBX_VER_IEEE;
7205 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
7206 /* Enable DCB tagging only when more than one TC
7207 * or explicitly disable if only one TC
7209 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
7210 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
7212 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
7213 dev_dbg(&pf->pdev->dev,
7214 "DCBX offload is supported for this PF.\n");
7216 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7217 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7218 set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
7220 dev_info(&pf->pdev->dev,
7221 "Query for DCB configuration failed, err %pe aq_err %s\n",
7223 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7229 #endif /* CONFIG_I40E_DCB */
7232 * i40e_print_link_message - print link up or down
7233 * @vsi: the VSI for which link needs a message
7234 * @isup: true of link is up, false otherwise
7236 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7238 enum i40e_aq_link_speed new_speed;
7239 struct i40e_pf *pf = vsi->back;
7240 char *speed = "Unknown";
7241 char *fc = "Unknown";
7247 new_speed = pf->hw.phy.link_info.link_speed;
7249 new_speed = I40E_LINK_SPEED_UNKNOWN;
7251 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7253 vsi->current_isup = isup;
7254 vsi->current_speed = new_speed;
7256 netdev_info(vsi->netdev, "NIC Link is Down\n");
7260 /* Warn user if link speed on NPAR enabled partition is not at
7263 if (pf->hw.func_caps.npar_enable &&
7264 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7265 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7266 netdev_warn(vsi->netdev,
7267 "The partition detected link speed that is less than 10Gbps\n");
7269 switch (pf->hw.phy.link_info.link_speed) {
7270 case I40E_LINK_SPEED_40GB:
7273 case I40E_LINK_SPEED_20GB:
7276 case I40E_LINK_SPEED_25GB:
7279 case I40E_LINK_SPEED_10GB:
7282 case I40E_LINK_SPEED_5GB:
7285 case I40E_LINK_SPEED_2_5GB:
7288 case I40E_LINK_SPEED_1GB:
7291 case I40E_LINK_SPEED_100MB:
7298 switch (pf->hw.fc.current_mode) {
7302 case I40E_FC_TX_PAUSE:
7305 case I40E_FC_RX_PAUSE:
7313 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7318 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7321 if (pf->hw.phy.link_info.fec_info &
7322 I40E_AQ_CONFIG_FEC_KR_ENA)
7323 fec = "CL74 FC-FEC/BASE-R";
7324 else if (pf->hw.phy.link_info.fec_info &
7325 I40E_AQ_CONFIG_FEC_RS_ENA)
7326 fec = "CL108 RS-FEC";
7328 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7329 * both RS and FC are requested
7331 if (vsi->back->hw.phy.link_info.req_fec_info &
7332 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7333 if (vsi->back->hw.phy.link_info.req_fec_info &
7334 I40E_AQ_REQUEST_FEC_RS)
7335 req_fec = "CL108 RS-FEC";
7337 req_fec = "CL74 FC-FEC/BASE-R";
7339 netdev_info(vsi->netdev,
7340 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7341 speed, req_fec, fec, an, fc);
7342 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7347 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7350 if (pf->hw.phy.link_info.fec_info &
7351 I40E_AQ_CONFIG_FEC_KR_ENA)
7352 fec = "CL74 FC-FEC/BASE-R";
7354 if (pf->hw.phy.link_info.req_fec_info &
7355 I40E_AQ_REQUEST_FEC_KR)
7356 req_fec = "CL74 FC-FEC/BASE-R";
7358 netdev_info(vsi->netdev,
7359 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7360 speed, req_fec, fec, an, fc);
7362 netdev_info(vsi->netdev,
7363 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7370 * i40e_up_complete - Finish the last steps of bringing up a connection
7371 * @vsi: the VSI being configured
7373 static int i40e_up_complete(struct i40e_vsi *vsi)
7375 struct i40e_pf *pf = vsi->back;
7378 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
7379 i40e_vsi_configure_msix(vsi);
7381 i40e_configure_msi_and_legacy(vsi);
7384 err = i40e_vsi_start_rings(vsi);
7388 clear_bit(__I40E_VSI_DOWN, vsi->state);
7389 i40e_napi_enable_all(vsi);
7390 i40e_vsi_enable_irq(vsi);
7392 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7394 i40e_print_link_message(vsi, true);
7395 netif_tx_start_all_queues(vsi->netdev);
7396 netif_carrier_on(vsi->netdev);
7399 /* replay FDIR SB filters */
7400 if (vsi->type == I40E_VSI_FDIR) {
7401 /* reset fd counters */
7404 i40e_fdir_filter_restore(vsi);
7407 /* On the next run of the service_task, notify any clients of the new
7410 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
7411 i40e_service_event_schedule(pf);
7417 * i40e_vsi_reinit_locked - Reset the VSI
7418 * @vsi: the VSI being configured
7420 * Rebuild the ring structs after some configuration
7421 * has changed, e.g. MTU size.
7423 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7425 struct i40e_pf *pf = vsi->back;
7427 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
7428 usleep_range(1000, 2000);
7432 clear_bit(__I40E_CONFIG_BUSY, pf->state);
7436 * i40e_force_link_state - Force the link status
7437 * @pf: board private structure
7438 * @is_up: whether the link state should be forced up or down
7440 static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7442 struct i40e_aq_get_phy_abilities_resp abilities;
7443 struct i40e_aq_set_phy_config config = {0};
7444 bool non_zero_phy_type = is_up;
7445 struct i40e_hw *hw = &pf->hw;
7450 /* Card might've been put in an unstable state by other drivers
7451 * and applications, which causes incorrect speed values being
7452 * set on startup. In order to clear speed registers, we call
7453 * get_phy_capabilities twice, once to get initial state of
7454 * available speeds, and once to get current PHY config.
7456 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
7459 dev_err(&pf->pdev->dev,
7460 "failed to get phy cap., ret = %pe last_status = %s\n",
7462 i40e_aq_str(hw, hw->aq.asq_last_status));
7465 speed = abilities.link_speed;
7467 /* Get the current phy config */
7468 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
7471 dev_err(&pf->pdev->dev,
7472 "failed to get phy cap., ret = %pe last_status = %s\n",
7474 i40e_aq_str(hw, hw->aq.asq_last_status));
7478 /* If link needs to go up, but was not forced to go down,
7479 * and its speed values are OK, no need for a flap
7480 * if non_zero_phy_type was set, still need to force up
7482 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags))
7483 non_zero_phy_type = true;
7484 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7487 /* To force link we need to set bits for all supported PHY types,
7488 * but there are now more than 32, so we need to split the bitmap
7489 * across two fields.
7491 mask = I40E_PHY_TYPES_BITMASK;
7493 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7494 config.phy_type_ext =
7495 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7496 /* Copy the old settings, except of phy_type */
7497 config.abilities = abilities.abilities;
7498 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
7500 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7502 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7504 if (abilities.link_speed != 0)
7505 config.link_speed = abilities.link_speed;
7507 config.link_speed = speed;
7508 config.eee_capability = abilities.eee_capability;
7509 config.eeer = abilities.eeer_val;
7510 config.low_power_ctrl = abilities.d3_lpan;
7511 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7512 I40E_AQ_PHY_FEC_CONFIG_MASK;
7513 err = i40e_aq_set_phy_config(hw, &config, NULL);
7516 dev_err(&pf->pdev->dev,
7517 "set phy config ret = %pe last_status = %s\n",
7519 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7523 /* Update the link info */
7524 err = i40e_update_link_info(hw);
7526 /* Wait a little bit (on 40G cards it sometimes takes a really
7527 * long time for link to come back from the atomic reset)
7531 i40e_update_link_info(hw);
7534 i40e_aq_set_link_restart_an(hw, is_up, NULL);
7540 * i40e_up - Bring the connection back up after being down
7541 * @vsi: the VSI being configured
7543 int i40e_up(struct i40e_vsi *vsi)
7547 if (vsi->type == I40E_VSI_MAIN &&
7548 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
7549 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
7550 i40e_force_link_state(vsi->back, true);
7552 err = i40e_vsi_configure(vsi);
7554 err = i40e_up_complete(vsi);
7560 * i40e_down - Shutdown the connection processing
7561 * @vsi: the VSI being stopped
7563 void i40e_down(struct i40e_vsi *vsi)
7567 /* It is assumed that the caller of this function
7568 * sets the vsi->state __I40E_VSI_DOWN bit.
7571 netif_carrier_off(vsi->netdev);
7572 netif_tx_disable(vsi->netdev);
7574 i40e_vsi_disable_irq(vsi);
7575 i40e_vsi_stop_rings(vsi);
7576 if (vsi->type == I40E_VSI_MAIN &&
7577 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
7578 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
7579 i40e_force_link_state(vsi->back, false);
7580 i40e_napi_disable_all(vsi);
7582 for (i = 0; i < vsi->num_queue_pairs; i++) {
7583 i40e_clean_tx_ring(vsi->tx_rings[i]);
7584 if (i40e_enabled_xdp_vsi(vsi)) {
7585 /* Make sure that in-progress ndo_xdp_xmit and
7586 * ndo_xsk_wakeup calls are completed.
7589 i40e_clean_tx_ring(vsi->xdp_rings[i]);
7591 i40e_clean_rx_ring(vsi->rx_rings[i]);
7597 * i40e_validate_mqprio_qopt- validate queue mapping info
7598 * @vsi: the VSI being configured
7599 * @mqprio_qopt: queue parametrs
7601 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7602 struct tc_mqprio_qopt_offload *mqprio_qopt)
7604 u64 sum_max_rate = 0;
7608 if (mqprio_qopt->qopt.offset[0] != 0 ||
7609 mqprio_qopt->qopt.num_tc < 1 ||
7610 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7612 for (i = 0; ; i++) {
7613 if (!mqprio_qopt->qopt.count[i])
7615 if (mqprio_qopt->min_rate[i]) {
7616 dev_err(&vsi->back->pdev->dev,
7617 "Invalid min tx rate (greater than 0) specified\n");
7620 max_rate = mqprio_qopt->max_rate[i];
7621 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7622 sum_max_rate += max_rate;
7624 if (i >= mqprio_qopt->qopt.num_tc - 1)
7626 if (mqprio_qopt->qopt.offset[i + 1] !=
7627 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7630 if (vsi->num_queue_pairs <
7631 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7632 dev_err(&vsi->back->pdev->dev,
7633 "Failed to create traffic channel, insufficient number of queues.\n");
7636 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7637 dev_err(&vsi->back->pdev->dev,
7638 "Invalid max tx rate specified\n");
7645 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7646 * @vsi: the VSI being configured
7648 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7653 /* Only TC0 is enabled */
7654 vsi->tc_config.numtc = 1;
7655 vsi->tc_config.enabled_tc = 1;
7656 qcount = min_t(int, vsi->alloc_queue_pairs,
7657 i40e_pf_get_max_q_per_tc(vsi->back));
7658 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7659 /* For the TC that is not enabled set the offset to default
7660 * queue and allocate one queue for the given TC.
7662 vsi->tc_config.tc_info[i].qoffset = 0;
7664 vsi->tc_config.tc_info[i].qcount = qcount;
7666 vsi->tc_config.tc_info[i].qcount = 1;
7667 vsi->tc_config.tc_info[i].netdev_tc = 0;
7672 * i40e_del_macvlan_filter
7673 * @hw: pointer to the HW structure
7674 * @seid: seid of the channel VSI
7675 * @macaddr: the mac address to apply as a filter
7676 * @aq_err: store the admin Q error
7678 * This function deletes a mac filter on the channel VSI which serves as the
7679 * macvlan. Returns 0 on success.
7681 static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7682 const u8 *macaddr, int *aq_err)
7684 struct i40e_aqc_remove_macvlan_element_data element;
7687 memset(&element, 0, sizeof(element));
7688 ether_addr_copy(element.mac_addr, macaddr);
7689 element.vlan_tag = 0;
7690 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7691 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
7692 *aq_err = hw->aq.asq_last_status;
7698 * i40e_add_macvlan_filter
7699 * @hw: pointer to the HW structure
7700 * @seid: seid of the channel VSI
7701 * @macaddr: the mac address to apply as a filter
7702 * @aq_err: store the admin Q error
7704 * This function adds a mac filter on the channel VSI which serves as the
7705 * macvlan. Returns 0 on success.
7707 static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7708 const u8 *macaddr, int *aq_err)
7710 struct i40e_aqc_add_macvlan_element_data element;
7714 ether_addr_copy(element.mac_addr, macaddr);
7715 element.vlan_tag = 0;
7716 element.queue_number = 0;
7717 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7718 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7719 element.flags = cpu_to_le16(cmd_flags);
7720 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
7721 *aq_err = hw->aq.asq_last_status;
7727 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7728 * @vsi: the VSI we want to access
7729 * @ch: the channel we want to access
7731 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7733 struct i40e_ring *tx_ring, *rx_ring;
7737 for (i = 0; i < ch->num_queue_pairs; i++) {
7738 pf_q = ch->base_queue + i;
7739 tx_ring = vsi->tx_rings[pf_q];
7741 rx_ring = vsi->rx_rings[pf_q];
7747 * i40e_free_macvlan_channels
7748 * @vsi: the VSI we want to access
7750 * This function frees the Qs of the channel VSI from
7751 * the stack and also deletes the channel VSIs which
7752 * serve as macvlans.
7754 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7756 struct i40e_channel *ch, *ch_tmp;
7759 if (list_empty(&vsi->macvlan_list))
7762 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7763 struct i40e_vsi *parent_vsi;
7765 if (i40e_is_channel_macvlan(ch)) {
7766 i40e_reset_ch_rings(vsi, ch);
7767 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7768 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
7769 netdev_set_sb_channel(ch->fwd->netdev, 0);
7774 list_del(&ch->list);
7775 parent_vsi = ch->parent_vsi;
7776 if (!parent_vsi || !ch->initialized) {
7781 /* remove the VSI */
7782 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7785 dev_err(&vsi->back->pdev->dev,
7786 "unable to remove channel (%d) for parent VSI(%d)\n",
7787 ch->seid, parent_vsi->seid);
7790 vsi->macvlan_cnt = 0;
7794 * i40e_fwd_ring_up - bring the macvlan device up
7795 * @vsi: the VSI we want to access
7796 * @vdev: macvlan netdevice
7797 * @fwd: the private fwd structure
7799 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7800 struct i40e_fwd_adapter *fwd)
7802 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7803 int ret = 0, num_tc = 1, i, aq_err;
7804 struct i40e_pf *pf = vsi->back;
7805 struct i40e_hw *hw = &pf->hw;
7807 /* Go through the list and find an available channel */
7808 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7809 if (!i40e_is_channel_macvlan(iter)) {
7811 /* record configuration for macvlan interface in vdev */
7812 for (i = 0; i < num_tc; i++)
7813 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7815 iter->num_queue_pairs,
7817 for (i = 0; i < iter->num_queue_pairs; i++) {
7818 struct i40e_ring *tx_ring, *rx_ring;
7821 pf_q = iter->base_queue + i;
7823 /* Get to TX ring ptr */
7824 tx_ring = vsi->tx_rings[pf_q];
7827 /* Get the RX ring ptr */
7828 rx_ring = vsi->rx_rings[pf_q];
7839 /* Guarantee all rings are updated before we update the
7840 * MAC address filter.
7844 /* Add a mac filter */
7845 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7847 /* if we cannot add the MAC rule then disable the offload */
7848 macvlan_release_l2fw_offload(vdev);
7849 for (i = 0; i < ch->num_queue_pairs; i++) {
7850 struct i40e_ring *rx_ring;
7853 pf_q = ch->base_queue + i;
7854 rx_ring = vsi->rx_rings[pf_q];
7855 rx_ring->netdev = NULL;
7857 dev_info(&pf->pdev->dev,
7858 "Error adding mac filter on macvlan err %pe, aq_err %s\n",
7860 i40e_aq_str(hw, aq_err));
7861 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7868 * i40e_setup_macvlans - create the channels which will be macvlans
7869 * @vsi: the VSI we want to access
7870 * @macvlan_cnt: no. of macvlans to be setup
7871 * @qcnt: no. of Qs per macvlan
7872 * @vdev: macvlan netdevice
7874 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7875 struct net_device *vdev)
7877 struct i40e_pf *pf = vsi->back;
7878 struct i40e_hw *hw = &pf->hw;
7879 struct i40e_vsi_context ctxt;
7880 u16 sections, qmap, num_qps;
7881 struct i40e_channel *ch;
7882 int i, pow, ret = 0;
7885 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7888 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7890 /* find the next higher power-of-2 of num queue pairs */
7891 pow = fls(roundup_pow_of_two(num_qps) - 1);
7893 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7894 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7896 /* Setup context bits for the main VSI */
7897 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7898 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7899 memset(&ctxt, 0, sizeof(ctxt));
7900 ctxt.seid = vsi->seid;
7901 ctxt.pf_num = vsi->back->hw.pf_id;
7903 ctxt.uplink_seid = vsi->uplink_seid;
7904 ctxt.info = vsi->info;
7905 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7906 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7907 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7908 ctxt.info.valid_sections |= cpu_to_le16(sections);
7910 /* Reconfigure RSS for main VSI with new max queue count */
7911 vsi->rss_size = max_t(u16, num_qps, qcnt);
7912 ret = i40e_vsi_config_rss(vsi);
7914 dev_info(&pf->pdev->dev,
7915 "Failed to reconfig RSS for num_queues (%u)\n",
7919 vsi->reconfig_rss = true;
7920 dev_dbg(&vsi->back->pdev->dev,
7921 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7922 vsi->next_base_queue = num_qps;
7923 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7925 /* Update the VSI after updating the VSI queue-mapping
7928 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7930 dev_info(&pf->pdev->dev,
7931 "Update vsi tc config failed, err %pe aq_err %s\n",
7933 i40e_aq_str(hw, hw->aq.asq_last_status));
7936 /* update the local VSI info with updated queue map */
7937 i40e_vsi_update_queue_map(vsi, &ctxt);
7938 vsi->info.valid_sections = 0;
7940 /* Create channels for macvlans */
7941 INIT_LIST_HEAD(&vsi->macvlan_list);
7942 for (i = 0; i < macvlan_cnt; i++) {
7943 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7948 INIT_LIST_HEAD(&ch->list);
7949 ch->num_queue_pairs = qcnt;
7950 if (!i40e_setup_channel(pf, vsi, ch)) {
7955 ch->parent_vsi = vsi;
7956 vsi->cnt_q_avail -= ch->num_queue_pairs;
7958 list_add_tail(&ch->list, &vsi->macvlan_list);
7964 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7965 i40e_free_macvlan_channels(vsi);
7971 * i40e_fwd_add - configure macvlans
7972 * @netdev: net device to configure
7973 * @vdev: macvlan netdevice
7975 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7977 struct i40e_netdev_priv *np = netdev_priv(netdev);
7978 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7979 struct i40e_vsi *vsi = np->vsi;
7980 struct i40e_pf *pf = vsi->back;
7981 struct i40e_fwd_adapter *fwd;
7982 int avail_macvlan, ret;
7984 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
7985 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7986 return ERR_PTR(-EINVAL);
7988 if (i40e_is_tc_mqprio_enabled(pf)) {
7989 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7990 return ERR_PTR(-EINVAL);
7992 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7993 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7994 return ERR_PTR(-EINVAL);
7997 /* The macvlan device has to be a single Q device so that the
7998 * tc_to_txq field can be reused to pick the tx queue.
8000 if (netif_is_multiqueue(vdev))
8001 return ERR_PTR(-ERANGE);
8003 if (!vsi->macvlan_cnt) {
8004 /* reserve bit 0 for the pf device */
8005 set_bit(0, vsi->fwd_bitmask);
8007 /* Try to reserve as many queues as possible for macvlans. First
8008 * reserve 3/4th of max vectors, then half, then quarter and
8009 * calculate Qs per macvlan as you go
8011 vectors = pf->num_lan_msix;
8012 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
8013 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
8015 macvlan_cnt = (vectors - 32) / 4;
8016 } else if (vectors <= 64 && vectors > 32) {
8017 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
8019 macvlan_cnt = (vectors - 16) / 2;
8020 } else if (vectors <= 32 && vectors > 16) {
8021 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
8023 macvlan_cnt = vectors - 16;
8024 } else if (vectors <= 16 && vectors > 8) {
8025 /* allocate 1 Q per macvlan and 8 Qs to the PF */
8027 macvlan_cnt = vectors - 8;
8029 /* allocate 1 Q per macvlan and 1 Q to the PF */
8031 macvlan_cnt = vectors - 1;
8034 if (macvlan_cnt == 0)
8035 return ERR_PTR(-EBUSY);
8037 /* Quiesce VSI queues */
8038 i40e_quiesce_vsi(vsi);
8040 /* sets up the macvlans but does not "enable" them */
8041 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
8044 return ERR_PTR(ret);
8047 i40e_unquiesce_vsi(vsi);
8049 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
8051 if (avail_macvlan >= I40E_MAX_MACVLANS)
8052 return ERR_PTR(-EBUSY);
8054 /* create the fwd struct */
8055 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
8057 return ERR_PTR(-ENOMEM);
8059 set_bit(avail_macvlan, vsi->fwd_bitmask);
8060 fwd->bit_no = avail_macvlan;
8061 netdev_set_sb_channel(vdev, avail_macvlan);
8064 if (!netif_running(netdev))
8067 /* Set fwd ring up */
8068 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8070 /* unbind the queues and drop the subordinate channel config */
8071 netdev_unbind_sb_channel(netdev, vdev);
8072 netdev_set_sb_channel(vdev, 0);
8075 return ERR_PTR(-EINVAL);
8082 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8083 * @vsi: the VSI we want to access
8085 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8087 struct i40e_channel *ch, *ch_tmp;
8088 struct i40e_pf *pf = vsi->back;
8089 struct i40e_hw *hw = &pf->hw;
8090 int aq_err, ret = 0;
8092 if (list_empty(&vsi->macvlan_list))
8095 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8096 if (i40e_is_channel_macvlan(ch)) {
8097 ret = i40e_del_macvlan_filter(hw, ch->seid,
8098 i40e_channel_mac(ch),
8101 /* Reset queue contexts */
8102 i40e_reset_ch_rings(vsi, ch);
8103 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8104 netdev_unbind_sb_channel(vsi->netdev,
8106 netdev_set_sb_channel(ch->fwd->netdev, 0);
8115 * i40e_fwd_del - delete macvlan interfaces
8116 * @netdev: net device to configure
8117 * @vdev: macvlan netdevice
8119 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8121 struct i40e_netdev_priv *np = netdev_priv(netdev);
8122 struct i40e_fwd_adapter *fwd = vdev;
8123 struct i40e_channel *ch, *ch_tmp;
8124 struct i40e_vsi *vsi = np->vsi;
8125 struct i40e_pf *pf = vsi->back;
8126 struct i40e_hw *hw = &pf->hw;
8127 int aq_err, ret = 0;
8129 /* Find the channel associated with the macvlan and del mac filter */
8130 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8131 if (i40e_is_channel_macvlan(ch) &&
8132 ether_addr_equal(i40e_channel_mac(ch),
8133 fwd->netdev->dev_addr)) {
8134 ret = i40e_del_macvlan_filter(hw, ch->seid,
8135 i40e_channel_mac(ch),
8138 /* Reset queue contexts */
8139 i40e_reset_ch_rings(vsi, ch);
8140 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
8141 netdev_unbind_sb_channel(netdev, fwd->netdev);
8142 netdev_set_sb_channel(fwd->netdev, 0);
8146 dev_info(&pf->pdev->dev,
8147 "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
8149 i40e_aq_str(hw, aq_err));
8157 * i40e_setup_tc - configure multiple traffic classes
8158 * @netdev: net device to configure
8159 * @type_data: tc offload data
8161 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8163 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8164 struct i40e_netdev_priv *np = netdev_priv(netdev);
8165 struct i40e_vsi *vsi = np->vsi;
8166 struct i40e_pf *pf = vsi->back;
8167 u8 enabled_tc = 0, num_tc, hw;
8168 bool need_reset = false;
8169 int old_queue_pairs;
8174 old_queue_pairs = vsi->num_queue_pairs;
8175 num_tc = mqprio_qopt->qopt.num_tc;
8176 hw = mqprio_qopt->qopt.hw;
8177 mode = mqprio_qopt->mode;
8179 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8180 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8184 /* Check if MFP enabled */
8185 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
8187 "Configuring TC not supported in MFP mode\n");
8191 case TC_MQPRIO_MODE_DCB:
8192 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8194 /* Check if DCB enabled to continue */
8195 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
8197 "DCB is not enabled for adapter\n");
8201 /* Check whether tc count is within enabled limit */
8202 if (num_tc > i40e_pf_get_num_tc(pf)) {
8204 "TC count greater than enabled on link for adapter\n");
8208 case TC_MQPRIO_MODE_CHANNEL:
8209 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
8211 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8214 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
8216 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8219 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8220 sizeof(*mqprio_qopt));
8221 set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
8222 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
8229 /* Generate TC map for number of tc requested */
8230 for (i = 0; i < num_tc; i++)
8231 enabled_tc |= BIT(i);
8233 /* Requesting same TC configuration as already enabled */
8234 if (enabled_tc == vsi->tc_config.enabled_tc &&
8235 mode != TC_MQPRIO_MODE_CHANNEL)
8238 /* Quiesce VSI queues */
8239 i40e_quiesce_vsi(vsi);
8241 if (!hw && !i40e_is_tc_mqprio_enabled(pf))
8242 i40e_remove_queue_channels(vsi);
8244 /* Configure VSI for enabled TCs */
8245 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8247 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
8251 } else if (enabled_tc &&
8252 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
8254 "Failed to create channel. Override queues (%u) not power of 2\n",
8255 vsi->tc_config.tc_info[0].qcount);
8261 dev_info(&vsi->back->pdev->dev,
8262 "Setup channel (id:%u) utilizing num_queues %d\n",
8263 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8265 if (i40e_is_tc_mqprio_enabled(pf)) {
8266 if (vsi->mqprio_qopt.max_rate[0]) {
8267 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
8268 vsi->mqprio_qopt.max_rate[0]);
8270 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
8272 u64 credits = max_tx_rate;
8274 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8275 dev_dbg(&vsi->back->pdev->dev,
8276 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8285 ret = i40e_configure_queue_channels(vsi);
8287 vsi->num_queue_pairs = old_queue_pairs;
8289 "Failed configuring queue channels\n");
8296 /* Reset the configuration data to defaults, only TC0 is enabled */
8298 i40e_vsi_set_default_tc_config(vsi);
8303 i40e_unquiesce_vsi(vsi);
8308 * i40e_set_cld_element - sets cloud filter element data
8309 * @filter: cloud filter rule
8310 * @cld: ptr to cloud filter element data
8312 * This is helper function to copy data into cloud filter element
8315 i40e_set_cld_element(struct i40e_cloud_filter *filter,
8316 struct i40e_aqc_cloud_filters_element_data *cld)
8321 memset(cld, 0, sizeof(*cld));
8322 ether_addr_copy(cld->outer_mac, filter->dst_mac);
8323 ether_addr_copy(cld->inner_mac, filter->src_mac);
8325 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8328 if (filter->n_proto == ETH_P_IPV6) {
8329 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8330 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8331 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8333 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8336 ipa = be32_to_cpu(filter->dst_ipv4);
8338 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8341 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8343 /* tenant_id is not supported by FW now, once the support is enabled
8344 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8346 if (filter->tenant_id)
8351 * i40e_add_del_cloud_filter - Add/del cloud filter
8352 * @vsi: pointer to VSI
8353 * @filter: cloud filter rule
8354 * @add: if true, add, if false, delete
8356 * Add or delete a cloud filter for a specific flow spec.
8357 * Returns 0 if the filter were successfully added.
8359 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8360 struct i40e_cloud_filter *filter, bool add)
8362 struct i40e_aqc_cloud_filters_element_data cld_filter;
8363 struct i40e_pf *pf = vsi->back;
8365 static const u16 flag_table[128] = {
8366 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8367 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8368 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8369 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8370 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8371 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8372 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8373 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8374 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8375 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8376 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8377 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8378 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8379 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8382 if (filter->flags >= ARRAY_SIZE(flag_table))
8385 memset(&cld_filter, 0, sizeof(cld_filter));
8387 /* copy element needed to add cloud filter from filter */
8388 i40e_set_cld_element(filter, &cld_filter);
8390 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8391 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8392 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8394 if (filter->n_proto == ETH_P_IPV6)
8395 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8396 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8398 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8399 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8402 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
8405 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
8408 dev_dbg(&pf->pdev->dev,
8409 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8410 add ? "add" : "delete", filter->dst_port, ret,
8411 pf->hw.aq.asq_last_status);
8413 dev_info(&pf->pdev->dev,
8414 "%s cloud filter for VSI: %d\n",
8415 add ? "Added" : "Deleted", filter->seid);
8420 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8421 * @vsi: pointer to VSI
8422 * @filter: cloud filter rule
8423 * @add: if true, add, if false, delete
8425 * Add or delete a cloud filter for a specific flow spec using big buffer.
8426 * Returns 0 if the filter were successfully added.
8428 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8429 struct i40e_cloud_filter *filter,
8432 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8433 struct i40e_pf *pf = vsi->back;
8436 /* Both (src/dst) valid mac_addr are not supported */
8437 if ((is_valid_ether_addr(filter->dst_mac) &&
8438 is_valid_ether_addr(filter->src_mac)) ||
8439 (is_multicast_ether_addr(filter->dst_mac) &&
8440 is_multicast_ether_addr(filter->src_mac)))
8443 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8444 * ports are not supported via big buffer now.
8446 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8449 /* adding filter using src_port/src_ip is not supported at this stage */
8450 if (filter->src_port ||
8451 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8452 !ipv6_addr_any(&filter->ip.v6.src_ip6))
8455 memset(&cld_filter, 0, sizeof(cld_filter));
8457 /* copy element needed to add cloud filter from filter */
8458 i40e_set_cld_element(filter, &cld_filter.element);
8460 if (is_valid_ether_addr(filter->dst_mac) ||
8461 is_valid_ether_addr(filter->src_mac) ||
8462 is_multicast_ether_addr(filter->dst_mac) ||
8463 is_multicast_ether_addr(filter->src_mac)) {
8464 /* MAC + IP : unsupported mode */
8465 if (filter->dst_ipv4)
8468 /* since we validated that L4 port must be valid before
8469 * we get here, start with respective "flags" value
8470 * and update if vlan is present or not
8472 cld_filter.element.flags =
8473 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8475 if (filter->vlan_id) {
8476 cld_filter.element.flags =
8477 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8480 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8481 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
8482 cld_filter.element.flags =
8483 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8484 if (filter->n_proto == ETH_P_IPV6)
8485 cld_filter.element.flags |=
8486 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8488 cld_filter.element.flags |=
8489 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8491 dev_err(&pf->pdev->dev,
8492 "either mac or ip has to be valid for cloud filter\n");
8496 /* Now copy L4 port in Byte 6..7 in general fields */
8497 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8498 be16_to_cpu(filter->dst_port);
8501 /* Validate current device switch mode, change if necessary */
8502 ret = i40e_validate_and_set_switch_mode(vsi);
8504 dev_err(&pf->pdev->dev,
8505 "failed to set switch mode, ret %d\n",
8510 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
8513 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
8518 dev_dbg(&pf->pdev->dev,
8519 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8520 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8522 dev_info(&pf->pdev->dev,
8523 "%s cloud filter for VSI: %d, L4 port: %d\n",
8524 add ? "add" : "delete", filter->seid,
8525 ntohs(filter->dst_port));
8530 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8531 * @vsi: Pointer to VSI
8532 * @f: Pointer to struct flow_cls_offload
8533 * @filter: Pointer to cloud filter structure
8536 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8537 struct flow_cls_offload *f,
8538 struct i40e_cloud_filter *filter)
8540 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8541 struct flow_dissector *dissector = rule->match.dissector;
8542 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8543 struct i40e_pf *pf = vsi->back;
8546 if (dissector->used_keys &
8547 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
8548 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
8549 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8550 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
8551 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8552 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8553 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
8554 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8555 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n",
8556 dissector->used_keys);
8560 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8561 struct flow_match_enc_keyid match;
8563 flow_rule_match_enc_keyid(rule, &match);
8564 if (match.mask->keyid != 0)
8565 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8567 filter->tenant_id = be32_to_cpu(match.key->keyid);
8570 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
8571 struct flow_match_basic match;
8573 flow_rule_match_basic(rule, &match);
8574 n_proto_key = ntohs(match.key->n_proto);
8575 n_proto_mask = ntohs(match.mask->n_proto);
8577 if (n_proto_key == ETH_P_ALL) {
8581 filter->n_proto = n_proto_key & n_proto_mask;
8582 filter->ip_proto = match.key->ip_proto;
8585 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8586 struct flow_match_eth_addrs match;
8588 flow_rule_match_eth_addrs(rule, &match);
8590 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8591 if (!is_zero_ether_addr(match.mask->dst)) {
8592 if (is_broadcast_ether_addr(match.mask->dst)) {
8593 field_flags |= I40E_CLOUD_FIELD_OMAC;
8595 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8601 if (!is_zero_ether_addr(match.mask->src)) {
8602 if (is_broadcast_ether_addr(match.mask->src)) {
8603 field_flags |= I40E_CLOUD_FIELD_IMAC;
8605 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8610 ether_addr_copy(filter->dst_mac, match.key->dst);
8611 ether_addr_copy(filter->src_mac, match.key->src);
8614 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
8615 struct flow_match_vlan match;
8617 flow_rule_match_vlan(rule, &match);
8618 if (match.mask->vlan_id) {
8619 if (match.mask->vlan_id == VLAN_VID_MASK) {
8620 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8623 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8624 match.mask->vlan_id);
8629 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8632 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
8633 struct flow_match_control match;
8635 flow_rule_match_control(rule, &match);
8636 addr_type = match.key->addr_type;
8639 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8640 struct flow_match_ipv4_addrs match;
8642 flow_rule_match_ipv4_addrs(rule, &match);
8643 if (match.mask->dst) {
8644 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8645 field_flags |= I40E_CLOUD_FIELD_IIP;
8647 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8653 if (match.mask->src) {
8654 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8655 field_flags |= I40E_CLOUD_FIELD_IIP;
8657 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8663 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8664 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8667 filter->dst_ipv4 = match.key->dst;
8668 filter->src_ipv4 = match.key->src;
8671 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8672 struct flow_match_ipv6_addrs match;
8674 flow_rule_match_ipv6_addrs(rule, &match);
8676 /* src and dest IPV6 address should not be LOOPBACK
8677 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8679 if (ipv6_addr_loopback(&match.key->dst) ||
8680 ipv6_addr_loopback(&match.key->src)) {
8681 dev_err(&pf->pdev->dev,
8682 "Bad ipv6, addr is LOOPBACK\n");
8685 if (!ipv6_addr_any(&match.mask->dst) ||
8686 !ipv6_addr_any(&match.mask->src))
8687 field_flags |= I40E_CLOUD_FIELD_IIP;
8689 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8690 sizeof(filter->src_ipv6));
8691 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8692 sizeof(filter->dst_ipv6));
8695 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
8696 struct flow_match_ports match;
8698 flow_rule_match_ports(rule, &match);
8699 if (match.mask->src) {
8700 if (match.mask->src == cpu_to_be16(0xffff)) {
8701 field_flags |= I40E_CLOUD_FIELD_IIP;
8703 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8704 be16_to_cpu(match.mask->src));
8709 if (match.mask->dst) {
8710 if (match.mask->dst == cpu_to_be16(0xffff)) {
8711 field_flags |= I40E_CLOUD_FIELD_IIP;
8713 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8714 be16_to_cpu(match.mask->dst));
8719 filter->dst_port = match.key->dst;
8720 filter->src_port = match.key->src;
8722 switch (filter->ip_proto) {
8727 dev_err(&pf->pdev->dev,
8728 "Only UDP and TCP transport are supported\n");
8732 filter->flags = field_flags;
8737 * i40e_handle_tclass: Forward to a traffic class on the device
8738 * @vsi: Pointer to VSI
8739 * @tc: traffic class index on the device
8740 * @filter: Pointer to cloud filter structure
8743 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8744 struct i40e_cloud_filter *filter)
8746 struct i40e_channel *ch, *ch_tmp;
8748 /* direct to a traffic class on the same device */
8750 filter->seid = vsi->seid;
8752 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8753 if (!filter->dst_port) {
8754 dev_err(&vsi->back->pdev->dev,
8755 "Specify destination port to direct to traffic class that is not default\n");
8758 if (list_empty(&vsi->ch_list))
8760 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8762 if (ch->seid == vsi->tc_seid_map[tc])
8763 filter->seid = ch->seid;
8767 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8772 * i40e_configure_clsflower - Configure tc flower filters
8773 * @vsi: Pointer to VSI
8774 * @cls_flower: Pointer to struct flow_cls_offload
8777 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8778 struct flow_cls_offload *cls_flower)
8780 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
8781 struct i40e_cloud_filter *filter = NULL;
8782 struct i40e_pf *pf = vsi->back;
8786 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8791 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8795 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8796 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8799 if (pf->fdir_pf_active_filters ||
8800 (!hlist_empty(&pf->fdir_filter_list))) {
8801 dev_err(&vsi->back->pdev->dev,
8802 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8806 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) {
8807 dev_err(&vsi->back->pdev->dev,
8808 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8809 clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags);
8810 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags);
8813 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8817 filter->cookie = cls_flower->cookie;
8819 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8823 err = i40e_handle_tclass(vsi, tc, filter);
8827 /* Add cloud filter */
8828 if (filter->dst_port)
8829 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8831 err = i40e_add_del_cloud_filter(vsi, filter, true);
8834 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8839 /* add filter to the ordered list */
8840 INIT_HLIST_NODE(&filter->cloud_node);
8842 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8844 pf->num_cloud_filters++;
8853 * i40e_find_cloud_filter - Find the could filter in the list
8854 * @vsi: Pointer to VSI
8855 * @cookie: filter specific cookie
8858 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8859 unsigned long *cookie)
8861 struct i40e_cloud_filter *filter = NULL;
8862 struct hlist_node *node2;
8864 hlist_for_each_entry_safe(filter, node2,
8865 &vsi->back->cloud_filter_list, cloud_node)
8866 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8872 * i40e_delete_clsflower - Remove tc flower filters
8873 * @vsi: Pointer to VSI
8874 * @cls_flower: Pointer to struct flow_cls_offload
8877 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8878 struct flow_cls_offload *cls_flower)
8880 struct i40e_cloud_filter *filter = NULL;
8881 struct i40e_pf *pf = vsi->back;
8884 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8889 hash_del(&filter->cloud_node);
8891 if (filter->dst_port)
8892 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8894 err = i40e_add_del_cloud_filter(vsi, filter, false);
8898 dev_err(&pf->pdev->dev,
8899 "Failed to delete cloud filter, err %pe\n",
8901 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8904 pf->num_cloud_filters--;
8905 if (!pf->num_cloud_filters)
8906 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
8907 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
8908 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
8909 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
8910 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
8916 * i40e_setup_tc_cls_flower - flower classifier offloads
8917 * @np: net device to configure
8918 * @cls_flower: offload data
8920 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8921 struct flow_cls_offload *cls_flower)
8923 struct i40e_vsi *vsi = np->vsi;
8925 switch (cls_flower->command) {
8926 case FLOW_CLS_REPLACE:
8927 return i40e_configure_clsflower(vsi, cls_flower);
8928 case FLOW_CLS_DESTROY:
8929 return i40e_delete_clsflower(vsi, cls_flower);
8930 case FLOW_CLS_STATS:
8937 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8940 struct i40e_netdev_priv *np = cb_priv;
8942 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8946 case TC_SETUP_CLSFLOWER:
8947 return i40e_setup_tc_cls_flower(np, type_data);
8954 static LIST_HEAD(i40e_block_cb_list);
8956 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8959 struct i40e_netdev_priv *np = netdev_priv(netdev);
8962 case TC_SETUP_QDISC_MQPRIO:
8963 return i40e_setup_tc(netdev, type_data);
8964 case TC_SETUP_BLOCK:
8965 return flow_block_cb_setup_simple(type_data,
8966 &i40e_block_cb_list,
8967 i40e_setup_tc_block_cb,
8975 * i40e_open - Called when a network interface is made active
8976 * @netdev: network interface device structure
8978 * The open entry point is called when a network interface is made
8979 * active by the system (IFF_UP). At this point all resources needed
8980 * for transmit and receive operations are allocated, the interrupt
8981 * handler is registered with the OS, the netdev watchdog subtask is
8982 * enabled, and the stack is notified that the interface is ready.
8984 * Returns 0 on success, negative value on failure
8986 int i40e_open(struct net_device *netdev)
8988 struct i40e_netdev_priv *np = netdev_priv(netdev);
8989 struct i40e_vsi *vsi = np->vsi;
8990 struct i40e_pf *pf = vsi->back;
8993 /* disallow open during test or if eeprom is broken */
8994 if (test_bit(__I40E_TESTING, pf->state) ||
8995 test_bit(__I40E_BAD_EEPROM, pf->state))
8998 netif_carrier_off(netdev);
9000 if (i40e_force_link_state(pf, true))
9003 err = i40e_vsi_open(vsi);
9007 /* configure global TSO hardware offload settings */
9008 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
9009 TCP_FLAG_FIN) >> 16);
9010 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
9012 TCP_FLAG_CWR) >> 16);
9013 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
9014 udp_tunnel_get_rx_info(netdev);
9020 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
9021 * @vsi: vsi structure
9023 * This updates netdev's number of tx/rx queues
9025 * Returns status of setting tx/rx queues
9027 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
9031 ret = netif_set_real_num_rx_queues(vsi->netdev,
9032 vsi->num_queue_pairs);
9036 return netif_set_real_num_tx_queues(vsi->netdev,
9037 vsi->num_queue_pairs);
9042 * @vsi: the VSI to open
9044 * Finish initialization of the VSI.
9046 * Returns 0 on success, negative value on failure
9048 * Note: expects to be called while under rtnl_lock()
9050 int i40e_vsi_open(struct i40e_vsi *vsi)
9052 struct i40e_pf *pf = vsi->back;
9053 char int_name[I40E_INT_NAME_STR_LEN];
9056 /* allocate descriptors */
9057 err = i40e_vsi_setup_tx_resources(vsi);
9060 err = i40e_vsi_setup_rx_resources(vsi);
9064 err = i40e_vsi_configure(vsi);
9069 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
9070 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
9071 err = i40e_vsi_request_irq(vsi, int_name);
9075 /* Notify the stack of the actual queue counts. */
9076 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9078 goto err_set_queues;
9080 } else if (vsi->type == I40E_VSI_FDIR) {
9081 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
9082 dev_driver_string(&pf->pdev->dev),
9083 dev_name(&pf->pdev->dev));
9084 err = i40e_vsi_request_irq(vsi, int_name);
9093 err = i40e_up_complete(vsi);
9095 goto err_up_complete;
9102 i40e_vsi_free_irq(vsi);
9104 i40e_vsi_free_rx_resources(vsi);
9106 i40e_vsi_free_tx_resources(vsi);
9107 if (vsi == pf->vsi[pf->lan_vsi])
9108 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
9114 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9115 * @pf: Pointer to PF
9117 * This function destroys the hlist where all the Flow Director
9118 * filters were saved.
9120 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9122 struct i40e_fdir_filter *filter;
9123 struct i40e_flex_pit *pit_entry, *tmp;
9124 struct hlist_node *node2;
9126 hlist_for_each_entry_safe(filter, node2,
9127 &pf->fdir_filter_list, fdir_node) {
9128 hlist_del(&filter->fdir_node);
9132 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9133 list_del(&pit_entry->list);
9136 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
9138 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9139 list_del(&pit_entry->list);
9142 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
9144 pf->fdir_pf_active_filters = 0;
9145 i40e_reset_fdir_filter_cnt(pf);
9147 /* Reprogram the default input set for TCP/IPv4 */
9148 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9149 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9150 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9152 /* Reprogram the default input set for TCP/IPv6 */
9153 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9154 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9155 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9157 /* Reprogram the default input set for UDP/IPv4 */
9158 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9159 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9160 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9162 /* Reprogram the default input set for UDP/IPv6 */
9163 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9164 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9165 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9167 /* Reprogram the default input set for SCTP/IPv4 */
9168 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9169 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9170 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9172 /* Reprogram the default input set for SCTP/IPv6 */
9173 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9174 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9175 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9177 /* Reprogram the default input set for Other/IPv4 */
9178 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9179 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9181 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
9182 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9184 /* Reprogram the default input set for Other/IPv6 */
9185 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9186 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9188 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
9189 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9193 * i40e_cloud_filter_exit - Cleans up the cloud filters
9194 * @pf: Pointer to PF
9196 * This function destroys the hlist where all the cloud filters
9199 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9201 struct i40e_cloud_filter *cfilter;
9202 struct hlist_node *node;
9204 hlist_for_each_entry_safe(cfilter, node,
9205 &pf->cloud_filter_list, cloud_node) {
9206 hlist_del(&cfilter->cloud_node);
9209 pf->num_cloud_filters = 0;
9211 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
9212 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
9213 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
9214 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
9215 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
9220 * i40e_close - Disables a network interface
9221 * @netdev: network interface device structure
9223 * The close entry point is called when an interface is de-activated
9224 * by the OS. The hardware is still under the driver's control, but
9225 * this netdev interface is disabled.
9227 * Returns 0, this is not allowed to fail
9229 int i40e_close(struct net_device *netdev)
9231 struct i40e_netdev_priv *np = netdev_priv(netdev);
9232 struct i40e_vsi *vsi = np->vsi;
9234 i40e_vsi_close(vsi);
9240 * i40e_do_reset - Start a PF or Core Reset sequence
9241 * @pf: board private structure
9242 * @reset_flags: which reset is requested
9243 * @lock_acquired: indicates whether or not the lock has been acquired
9244 * before this function was called.
9246 * The essential difference in resets is that the PF Reset
9247 * doesn't clear the packet buffers, doesn't reset the PE
9248 * firmware, and doesn't bother the other PFs on the chip.
9250 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9252 struct i40e_vsi *vsi;
9256 /* do the biggest reset indicated */
9257 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9259 /* Request a Global Reset
9261 * This will start the chip's countdown to the actual full
9262 * chip reset event, and a warning interrupt to be sent
9263 * to all PFs, including the requestor. Our handler
9264 * for the warning interrupt will deal with the shutdown
9265 * and recovery of the switch setup.
9267 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9268 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9269 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9270 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9272 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9274 /* Request a Core Reset
9276 * Same as Global Reset, except does *not* include the MAC/PHY
9278 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9279 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9280 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9281 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9282 i40e_flush(&pf->hw);
9284 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9286 /* Request a PF Reset
9288 * Resets only the PF-specific registers
9290 * This goes directly to the tear-down and rebuild of
9291 * the switch, since we need to do all the recovery as
9292 * for the Core Reset.
9294 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9295 i40e_handle_reset_warning(pf, lock_acquired);
9297 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9298 /* Request a PF Reset
9300 * Resets PF and reinitializes PFs VSI.
9302 i40e_prep_for_reset(pf);
9303 i40e_reset_and_rebuild(pf, true, lock_acquired);
9304 dev_info(&pf->pdev->dev,
9305 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
9306 "FW LLDP is disabled\n" :
9307 "FW LLDP is enabled\n");
9309 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9310 /* Find the VSI(s) that requested a re-init */
9311 dev_info(&pf->pdev->dev, "VSI reinit requested\n");
9313 i40e_pf_for_each_vsi(pf, i, vsi) {
9314 if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
9316 i40e_vsi_reinit_locked(vsi);
9318 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9319 /* Find the VSI(s) that needs to be brought down */
9320 dev_info(&pf->pdev->dev, "VSI down requested\n");
9322 i40e_pf_for_each_vsi(pf, i, vsi) {
9323 if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
9325 set_bit(__I40E_VSI_DOWN, vsi->state);
9330 dev_info(&pf->pdev->dev,
9331 "bad reset request 0x%08x\n", reset_flags);
9335 #ifdef CONFIG_I40E_DCB
9337 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9338 * @pf: board private structure
9339 * @old_cfg: current DCB config
9340 * @new_cfg: new DCB config
9342 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9343 struct i40e_dcbx_config *old_cfg,
9344 struct i40e_dcbx_config *new_cfg)
9346 bool need_reconfig = false;
9348 /* Check if ETS configuration has changed */
9349 if (memcmp(&new_cfg->etscfg,
9351 sizeof(new_cfg->etscfg))) {
9352 /* If Priority Table has changed reconfig is needed */
9353 if (memcmp(&new_cfg->etscfg.prioritytable,
9354 &old_cfg->etscfg.prioritytable,
9355 sizeof(new_cfg->etscfg.prioritytable))) {
9356 need_reconfig = true;
9357 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9360 if (memcmp(&new_cfg->etscfg.tcbwtable,
9361 &old_cfg->etscfg.tcbwtable,
9362 sizeof(new_cfg->etscfg.tcbwtable)))
9363 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9365 if (memcmp(&new_cfg->etscfg.tsatable,
9366 &old_cfg->etscfg.tsatable,
9367 sizeof(new_cfg->etscfg.tsatable)))
9368 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9371 /* Check if PFC configuration has changed */
9372 if (memcmp(&new_cfg->pfc,
9374 sizeof(new_cfg->pfc))) {
9375 need_reconfig = true;
9376 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9379 /* Check if APP Table has changed */
9380 if (memcmp(&new_cfg->app,
9382 sizeof(new_cfg->app))) {
9383 need_reconfig = true;
9384 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9387 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9388 return need_reconfig;
9392 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9393 * @pf: board private structure
9394 * @e: event info posted on ARQ
9396 static int i40e_handle_lldp_event(struct i40e_pf *pf,
9397 struct i40e_arq_event_info *e)
9399 struct i40e_aqc_lldp_get_mib *mib =
9400 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9401 struct i40e_hw *hw = &pf->hw;
9402 struct i40e_dcbx_config tmp_dcbx_cfg;
9403 bool need_reconfig = false;
9407 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9408 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9409 (hw->phy.link_info.link_speed &
9410 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9411 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
9412 /* let firmware decide if the DCB should be disabled */
9413 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9415 /* Not DCB capable or capability disabled */
9416 if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
9419 /* Ignore if event is not for Nearest Bridge */
9420 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9421 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9422 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9423 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9426 /* Check MIB Type and return if event for Remote MIB update */
9427 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9428 dev_dbg(&pf->pdev->dev,
9429 "LLDP event mib type %s\n", type ? "remote" : "local");
9430 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9431 /* Update the remote cached instance and return */
9432 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9433 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9434 &hw->remote_dcbx_config);
9438 /* Store the old configuration */
9439 tmp_dcbx_cfg = hw->local_dcbx_config;
9441 /* Reset the old DCBx configuration data */
9442 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9443 /* Get updated DCBX data from firmware */
9444 ret = i40e_get_dcb_config(&pf->hw);
9446 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9447 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9448 (hw->phy.link_info.link_speed &
9449 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9450 dev_warn(&pf->pdev->dev,
9451 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9452 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9454 dev_info(&pf->pdev->dev,
9455 "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
9457 i40e_aq_str(&pf->hw,
9458 pf->hw.aq.asq_last_status));
9463 /* No change detected in DCBX configs */
9464 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
9465 sizeof(tmp_dcbx_cfg))) {
9466 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9470 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
9471 &hw->local_dcbx_config);
9473 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
9478 /* Enable DCB tagging only when more than one TC */
9479 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
9480 set_bit(I40E_FLAG_DCB_ENA, pf->flags);
9482 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9484 set_bit(__I40E_PORT_SUSPENDED, pf->state);
9485 /* Reconfiguration needed quiesce all VSIs */
9486 i40e_pf_quiesce_all_vsi(pf);
9488 /* Changes in configuration update VEB/VSI */
9489 i40e_dcb_reconfigure(pf);
9491 ret = i40e_resume_port_tx(pf);
9493 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
9494 /* In case of error no point in resuming VSIs */
9498 /* Wait for the PF's queues to be disabled */
9499 ret = i40e_pf_wait_queues_disabled(pf);
9501 /* Schedule PF reset to recover */
9502 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9503 i40e_service_event_schedule(pf);
9505 i40e_pf_unquiesce_all_vsi(pf);
9506 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
9507 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
9513 #endif /* CONFIG_I40E_DCB */
9516 * i40e_do_reset_safe - Protected reset path for userland calls.
9517 * @pf: board private structure
9518 * @reset_flags: which reset is requested
9521 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9524 i40e_do_reset(pf, reset_flags, true);
9529 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9530 * @pf: board private structure
9531 * @e: event info posted on ARQ
9533 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9536 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9537 struct i40e_arq_event_info *e)
9539 struct i40e_aqc_lan_overflow *data =
9540 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9541 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9542 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9543 struct i40e_hw *hw = &pf->hw;
9547 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9550 if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) !=
9551 I40E_QTX_CTL_VF_QUEUE)
9554 /* Queue belongs to VF, find the VF and issue VF reset */
9555 vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl);
9556 vf_id -= hw->func_caps.vf_base_id;
9557 vf = &pf->vf[vf_id];
9558 i40e_vc_notify_vf_reset(vf);
9559 /* Allow VF to process pending reset notification */
9561 i40e_reset_vf(vf, false);
9565 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9566 * @pf: board private structure
9568 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9572 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9573 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9578 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9579 * @pf: board private structure
9581 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9585 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9586 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9587 FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val);
9592 * i40e_get_global_fd_count - Get total FD filters programmed on device
9593 * @pf: board private structure
9595 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9599 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9600 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9601 FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val);
9606 * i40e_reenable_fdir_sb - Restore FDir SB capability
9607 * @pf: board private structure
9609 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9611 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
9612 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
9613 (I40E_DEBUG_FD & pf->hw.debug_mask))
9614 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9618 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9619 * @pf: board private structure
9621 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9623 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
9624 /* ATR uses the same filtering logic as SB rules. It only
9625 * functions properly if the input set mask is at the default
9626 * settings. It is safe to restore the default input set
9627 * because there are no active TCPv4 filter rules.
9629 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9630 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9631 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9633 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
9634 (I40E_DEBUG_FD & pf->hw.debug_mask))
9635 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9640 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9641 * @pf: board private structure
9642 * @filter: FDir filter to remove
9644 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9645 struct i40e_fdir_filter *filter)
9647 /* Update counters */
9648 pf->fdir_pf_active_filters--;
9651 switch (filter->flow_type) {
9653 pf->fd_tcp4_filter_cnt--;
9656 pf->fd_udp4_filter_cnt--;
9659 pf->fd_sctp4_filter_cnt--;
9662 pf->fd_tcp6_filter_cnt--;
9665 pf->fd_udp6_filter_cnt--;
9668 pf->fd_udp6_filter_cnt--;
9671 switch (filter->ipl4_proto) {
9673 pf->fd_tcp4_filter_cnt--;
9676 pf->fd_udp4_filter_cnt--;
9679 pf->fd_sctp4_filter_cnt--;
9682 pf->fd_ip4_filter_cnt--;
9686 case IPV6_USER_FLOW:
9687 switch (filter->ipl4_proto) {
9689 pf->fd_tcp6_filter_cnt--;
9692 pf->fd_udp6_filter_cnt--;
9695 pf->fd_sctp6_filter_cnt--;
9698 pf->fd_ip6_filter_cnt--;
9704 /* Remove the filter from the list and free memory */
9705 hlist_del(&filter->fdir_node);
9710 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9711 * @pf: board private structure
9713 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9715 struct i40e_fdir_filter *filter;
9716 u32 fcnt_prog, fcnt_avail;
9717 struct hlist_node *node;
9719 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9722 /* Check if we have enough room to re-enable FDir SB capability. */
9723 fcnt_prog = i40e_get_global_fd_count(pf);
9724 fcnt_avail = pf->fdir_pf_filter_count;
9725 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9726 (pf->fd_add_err == 0) ||
9727 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9728 i40e_reenable_fdir_sb(pf);
9730 /* We should wait for even more space before re-enabling ATR.
9731 * Additionally, we cannot enable ATR as long as we still have TCP SB
9734 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9735 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9736 i40e_reenable_fdir_atr(pf);
9738 /* if hw had a problem adding a filter, delete it */
9739 if (pf->fd_inv > 0) {
9740 hlist_for_each_entry_safe(filter, node,
9741 &pf->fdir_filter_list, fdir_node)
9742 if (filter->fd_id == pf->fd_inv)
9743 i40e_delete_invalid_filter(pf, filter);
9747 #define I40E_MIN_FD_FLUSH_INTERVAL 10
9748 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9750 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9751 * @pf: board private structure
9753 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9755 unsigned long min_flush_time;
9756 int flush_wait_retry = 50;
9757 bool disable_atr = false;
9761 if (!time_after(jiffies, pf->fd_flush_timestamp +
9762 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9765 /* If the flush is happening too quick and we have mostly SB rules we
9766 * should not re-enable ATR for some time.
9768 min_flush_time = pf->fd_flush_timestamp +
9769 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9770 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9772 if (!(time_after(jiffies, min_flush_time)) &&
9773 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9774 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9775 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9779 pf->fd_flush_timestamp = jiffies;
9780 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9781 /* flush all filters */
9782 wr32(&pf->hw, I40E_PFQF_CTL_1,
9783 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9784 i40e_flush(&pf->hw);
9788 /* Check FD flush status every 5-6msec */
9789 usleep_range(5000, 6000);
9790 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9791 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9793 } while (flush_wait_retry--);
9794 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9795 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9797 /* replay sideband filters */
9798 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
9799 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9800 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
9801 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
9802 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9803 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9808 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9809 * @pf: board private structure
9811 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9813 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9817 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9818 * @pf: board private structure
9820 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9823 /* if interface is down do nothing */
9824 if (test_bit(__I40E_DOWN, pf->state))
9827 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9828 i40e_fdir_flush_and_replay(pf);
9830 i40e_fdir_check_and_reenable(pf);
9835 * i40e_vsi_link_event - notify VSI of a link event
9836 * @vsi: vsi to be notified
9837 * @link_up: link up or down
9839 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9841 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9844 switch (vsi->type) {
9846 if (!vsi->netdev || !vsi->netdev_registered)
9850 netif_carrier_on(vsi->netdev);
9851 netif_tx_wake_all_queues(vsi->netdev);
9853 netif_carrier_off(vsi->netdev);
9854 netif_tx_stop_all_queues(vsi->netdev);
9858 case I40E_VSI_SRIOV:
9859 case I40E_VSI_VMDQ2:
9861 case I40E_VSI_IWARP:
9862 case I40E_VSI_MIRROR:
9864 /* there is no notification for other VSIs */
9870 * i40e_veb_link_event - notify elements on the veb of a link event
9871 * @veb: veb to be notified
9872 * @link_up: link up or down
9874 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9876 struct i40e_vsi *vsi;
9880 if (!veb || !veb->pf)
9884 /* Send link event to contained VSIs */
9885 i40e_pf_for_each_vsi(pf, i, vsi)
9886 if (vsi->uplink_seid == veb->seid)
9887 i40e_vsi_link_event(vsi, link_up);
9891 * i40e_link_event - Update netif_carrier status
9892 * @pf: board private structure
9894 static void i40e_link_event(struct i40e_pf *pf)
9896 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9897 u8 new_link_speed, old_link_speed;
9898 bool new_link, old_link;
9900 #ifdef CONFIG_I40E_DCB
9902 #endif /* CONFIG_I40E_DCB */
9904 /* set this to force the get_link_status call to refresh state */
9905 pf->hw.phy.get_link_info = true;
9906 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9907 status = i40e_get_link_status(&pf->hw, &new_link);
9909 /* On success, disable temp link polling */
9911 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9913 /* Enable link polling temporarily until i40e_get_link_status
9916 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9917 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9922 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9923 new_link_speed = pf->hw.phy.link_info.link_speed;
9925 if (new_link == old_link &&
9926 new_link_speed == old_link_speed &&
9927 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9928 new_link == netif_carrier_ok(vsi->netdev)))
9931 i40e_print_link_message(vsi, new_link);
9933 /* Notify the base of the switch tree connected to
9934 * the link. Floating VEBs are not notified.
9936 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9937 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9939 i40e_vsi_link_event(vsi, new_link);
9942 i40e_vc_notify_link_state(pf);
9944 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
9945 i40e_ptp_set_increment(pf);
9946 #ifdef CONFIG_I40E_DCB
9947 if (new_link == old_link)
9949 /* Not SW DCB so firmware will take care of default settings */
9950 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9953 /* We cover here only link down, as after link up in case of SW DCB
9954 * SW LLDP agent will take care of setting it up
9957 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9958 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9959 err = i40e_dcb_sw_default_config(pf);
9961 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9962 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9964 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9965 DCB_CAP_DCBX_VER_IEEE;
9966 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
9967 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
9970 #endif /* CONFIG_I40E_DCB */
9974 * i40e_watchdog_subtask - periodic checks not using event driven response
9975 * @pf: board private structure
9977 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9979 struct i40e_vsi *vsi;
9980 struct i40e_veb *veb;
9983 /* if interface is down do nothing */
9984 if (test_bit(__I40E_DOWN, pf->state) ||
9985 test_bit(__I40E_CONFIG_BUSY, pf->state))
9988 /* make sure we don't do these things too often */
9989 if (time_before(jiffies, (pf->service_timer_previous +
9990 pf->service_timer_period)))
9992 pf->service_timer_previous = jiffies;
9994 if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) ||
9995 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9996 i40e_link_event(pf);
9998 /* Update the stats for active netdevs so the network stack
9999 * can look at updated numbers whenever it cares to
10001 i40e_pf_for_each_vsi(pf, i, vsi)
10003 i40e_update_stats(vsi);
10005 if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
10006 /* Update the stats for the active switching components */
10007 i40e_pf_for_each_veb(pf, i, veb)
10008 i40e_update_veb_stats(veb);
10011 i40e_ptp_rx_hang(pf);
10012 i40e_ptp_tx_hang(pf);
10016 * i40e_reset_subtask - Set up for resetting the device and driver
10017 * @pf: board private structure
10019 static void i40e_reset_subtask(struct i40e_pf *pf)
10021 u32 reset_flags = 0;
10023 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
10024 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
10025 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
10027 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
10028 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
10029 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
10031 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
10032 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
10033 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
10035 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
10036 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10037 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
10039 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10040 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10041 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
10044 /* If there's a recovery already waiting, it takes
10045 * precedence before starting a new reset sequence.
10047 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10048 i40e_prep_for_reset(pf);
10050 i40e_rebuild(pf, false, false);
10053 /* If we're already down or resetting, just bail */
10055 !test_bit(__I40E_DOWN, pf->state) &&
10056 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10057 i40e_do_reset(pf, reset_flags, false);
10062 * i40e_handle_link_event - Handle link event
10063 * @pf: board private structure
10064 * @e: event info posted on ARQ
10066 static void i40e_handle_link_event(struct i40e_pf *pf,
10067 struct i40e_arq_event_info *e)
10069 struct i40e_aqc_get_link_status *status =
10070 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10072 /* Do a new status request to re-enable LSE reporting
10073 * and load new status information into the hw struct
10074 * This completely ignores any state information
10075 * in the ARQ event info, instead choosing to always
10076 * issue the AQ update link status command.
10078 i40e_link_event(pf);
10080 /* Check if module meets thermal requirements */
10081 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10082 dev_err(&pf->pdev->dev,
10083 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10084 dev_err(&pf->pdev->dev,
10085 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10087 /* check for unqualified module, if link is down, suppress
10088 * the message if link was forced to be down.
10090 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10091 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10092 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10093 (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) {
10094 dev_err(&pf->pdev->dev,
10095 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10096 dev_err(&pf->pdev->dev,
10097 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10103 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10104 * @pf: board private structure
10106 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10108 struct i40e_arq_event_info event;
10109 struct i40e_hw *hw = &pf->hw;
10110 u16 pending, i = 0;
10116 /* Do not run clean AQ when PF reset fails */
10117 if (test_bit(__I40E_RESET_FAILED, pf->state))
10120 /* check for error indications */
10121 val = rd32(&pf->hw, I40E_PF_ARQLEN);
10123 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10124 if (hw->debug_mask & I40E_DEBUG_AQ)
10125 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10126 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10128 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10129 if (hw->debug_mask & I40E_DEBUG_AQ)
10130 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10131 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10132 pf->arq_overflows++;
10134 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10135 if (hw->debug_mask & I40E_DEBUG_AQ)
10136 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10137 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10140 wr32(&pf->hw, I40E_PF_ARQLEN, val);
10142 val = rd32(&pf->hw, I40E_PF_ATQLEN);
10144 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10145 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10146 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10147 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10149 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10150 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10151 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10152 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10154 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10155 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10156 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10157 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10160 wr32(&pf->hw, I40E_PF_ATQLEN, val);
10162 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10163 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
10164 if (!event.msg_buf)
10168 ret = i40e_clean_arq_element(hw, &event, &pending);
10169 if (ret == -EALREADY)
10172 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10176 opcode = le16_to_cpu(event.desc.opcode);
10179 case i40e_aqc_opc_get_link_status:
10181 i40e_handle_link_event(pf, &event);
10184 case i40e_aqc_opc_send_msg_to_pf:
10185 ret = i40e_vc_process_vf_msg(pf,
10186 le16_to_cpu(event.desc.retval),
10187 le32_to_cpu(event.desc.cookie_high),
10188 le32_to_cpu(event.desc.cookie_low),
10192 case i40e_aqc_opc_lldp_update_mib:
10193 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10194 #ifdef CONFIG_I40E_DCB
10196 i40e_handle_lldp_event(pf, &event);
10198 #endif /* CONFIG_I40E_DCB */
10200 case i40e_aqc_opc_event_lan_overflow:
10201 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10202 i40e_handle_lan_overflow_event(pf, &event);
10204 case i40e_aqc_opc_send_msg_to_peer:
10205 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10207 case i40e_aqc_opc_nvm_erase:
10208 case i40e_aqc_opc_nvm_update:
10209 case i40e_aqc_opc_oem_post_update:
10210 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10211 "ARQ NVM operation 0x%04x completed\n",
10215 dev_info(&pf->pdev->dev,
10216 "ARQ: Unknown event 0x%04x ignored\n",
10220 } while (i++ < I40E_AQ_WORK_LIMIT);
10222 if (i < I40E_AQ_WORK_LIMIT)
10223 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
10225 /* re-enable Admin queue interrupt cause */
10226 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10227 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10228 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10231 kfree(event.msg_buf);
10235 * i40e_verify_eeprom - make sure eeprom is good to use
10236 * @pf: board private structure
10238 static void i40e_verify_eeprom(struct i40e_pf *pf)
10242 err = i40e_diag_eeprom_test(&pf->hw);
10244 /* retry in case of garbage read */
10245 err = i40e_diag_eeprom_test(&pf->hw);
10247 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10249 set_bit(__I40E_BAD_EEPROM, pf->state);
10253 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10254 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10255 clear_bit(__I40E_BAD_EEPROM, pf->state);
10260 * i40e_enable_pf_switch_lb
10261 * @pf: pointer to the PF structure
10263 * enable switch loop back or die - no point in a return value
10265 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10267 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10268 struct i40e_vsi_context ctxt;
10271 ctxt.seid = pf->main_vsi_seid;
10272 ctxt.pf_num = pf->hw.pf_id;
10274 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10276 dev_info(&pf->pdev->dev,
10277 "couldn't get PF vsi config, err %pe aq_err %s\n",
10279 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10282 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10283 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10284 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10286 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10288 dev_info(&pf->pdev->dev,
10289 "update vsi switch failed, err %pe aq_err %s\n",
10291 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10296 * i40e_disable_pf_switch_lb
10297 * @pf: pointer to the PF structure
10299 * disable switch loop back or die - no point in a return value
10301 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10303 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10304 struct i40e_vsi_context ctxt;
10307 ctxt.seid = pf->main_vsi_seid;
10308 ctxt.pf_num = pf->hw.pf_id;
10310 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
10312 dev_info(&pf->pdev->dev,
10313 "couldn't get PF vsi config, err %pe aq_err %s\n",
10315 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10318 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10319 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10320 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10322 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
10324 dev_info(&pf->pdev->dev,
10325 "update vsi switch failed, err %pe aq_err %s\n",
10327 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10332 * i40e_config_bridge_mode - Configure the HW bridge mode
10333 * @veb: pointer to the bridge instance
10335 * Configure the loop back mode for the LAN VSI that is downlink to the
10336 * specified HW bridge instance. It is expected this function is called
10337 * when a new HW bridge is instantiated.
10339 static void i40e_config_bridge_mode(struct i40e_veb *veb)
10341 struct i40e_pf *pf = veb->pf;
10343 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10344 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10345 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10346 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10347 i40e_disable_pf_switch_lb(pf);
10349 i40e_enable_pf_switch_lb(pf);
10353 * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
10354 * @veb: pointer to the VEB instance
10356 * This is a function that builds the attached VSIs. We track the connections
10357 * through our own index numbers because the seid's from the HW could change
10358 * across the reset.
10360 static int i40e_reconstitute_veb(struct i40e_veb *veb)
10362 struct i40e_vsi *ctl_vsi = NULL;
10363 struct i40e_pf *pf = veb->pf;
10364 struct i40e_vsi *vsi;
10367 /* As we do not maintain PV (port virtualizer) switch element then
10368 * there can be only one non-floating VEB that have uplink to MAC SEID
10369 * and its control VSI is the main one.
10371 if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
10372 dev_err(&pf->pdev->dev,
10373 "Invalid uplink SEID for VEB %d\n", veb->idx);
10377 if (veb->uplink_seid == pf->mac_seid) {
10378 /* Check that the LAN VSI has VEB owning flag set */
10379 ctl_vsi = pf->vsi[pf->lan_vsi];
10381 if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
10382 !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
10383 dev_err(&pf->pdev->dev,
10384 "Invalid control VSI for VEB %d\n", veb->idx);
10388 /* Add the control VSI to switch */
10389 ret = i40e_add_vsi(ctl_vsi);
10391 dev_err(&pf->pdev->dev,
10392 "Rebuild of owner VSI for VEB %d failed: %d\n",
10397 i40e_vsi_reset_stats(ctl_vsi);
10400 /* create the VEB in the switch and move the VSI onto the VEB */
10401 ret = i40e_add_veb(veb, ctl_vsi);
10405 if (veb->uplink_seid) {
10406 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
10407 veb->bridge_mode = BRIDGE_MODE_VEB;
10409 veb->bridge_mode = BRIDGE_MODE_VEPA;
10410 i40e_config_bridge_mode(veb);
10413 /* create the remaining VSIs attached to this VEB */
10414 i40e_pf_for_each_vsi(pf, v, vsi) {
10415 if (vsi == ctl_vsi)
10418 if (vsi->veb_idx == veb->idx) {
10419 vsi->uplink_seid = veb->seid;
10420 ret = i40e_add_vsi(vsi);
10422 dev_info(&pf->pdev->dev,
10423 "rebuild of vsi_idx %d failed: %d\n",
10427 i40e_vsi_reset_stats(vsi);
10435 * i40e_get_capabilities - get info about the HW
10436 * @pf: the PF struct
10437 * @list_type: AQ capability to be queried
10439 static int i40e_get_capabilities(struct i40e_pf *pf,
10440 enum i40e_admin_queue_opc list_type)
10442 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10447 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10449 cap_buf = kzalloc(buf_len, GFP_KERNEL);
10453 /* this loads the data into the hw struct for us */
10454 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
10455 &data_size, list_type,
10457 /* data loaded, buffer no longer needed */
10460 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10461 /* retry with a larger buffer */
10462 buf_len = data_size;
10463 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10464 dev_info(&pf->pdev->dev,
10465 "capability discovery failed, err %pe aq_err %s\n",
10467 i40e_aq_str(&pf->hw,
10468 pf->hw.aq.asq_last_status));
10473 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10474 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10475 dev_info(&pf->pdev->dev,
10476 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10477 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10478 pf->hw.func_caps.num_msix_vectors,
10479 pf->hw.func_caps.num_msix_vectors_vf,
10480 pf->hw.func_caps.fd_filters_guaranteed,
10481 pf->hw.func_caps.fd_filters_best_effort,
10482 pf->hw.func_caps.num_tx_qp,
10483 pf->hw.func_caps.num_vsis);
10484 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10485 dev_info(&pf->pdev->dev,
10486 "switch_mode=0x%04x, function_valid=0x%08x\n",
10487 pf->hw.dev_caps.switch_mode,
10488 pf->hw.dev_caps.valid_functions);
10489 dev_info(&pf->pdev->dev,
10490 "SR-IOV=%d, num_vfs for all function=%u\n",
10491 pf->hw.dev_caps.sr_iov_1_1,
10492 pf->hw.dev_caps.num_vfs);
10493 dev_info(&pf->pdev->dev,
10494 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10495 pf->hw.dev_caps.num_vsis,
10496 pf->hw.dev_caps.num_rx_qp,
10497 pf->hw.dev_caps.num_tx_qp);
10500 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10501 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10502 + pf->hw.func_caps.num_vfs)
10503 if (pf->hw.revision_id == 0 &&
10504 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10505 dev_info(&pf->pdev->dev,
10506 "got num_vsis %d, setting num_vsis to %d\n",
10507 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10508 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10514 static int i40e_vsi_clear(struct i40e_vsi *vsi);
10517 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10518 * @pf: board private structure
10520 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10522 struct i40e_vsi *vsi;
10524 /* quick workaround for an NVM issue that leaves a critical register
10527 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10528 static const u32 hkey[] = {
10529 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10530 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10531 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10535 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10536 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10539 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
10542 /* find existing VSI and see if it needs configuring */
10543 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10545 /* create a new VSI if none exists */
10547 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
10548 pf->vsi[pf->lan_vsi]->seid, 0);
10550 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10551 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
10552 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
10557 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
10561 * i40e_fdir_teardown - release the Flow Director resources
10562 * @pf: board private structure
10564 static void i40e_fdir_teardown(struct i40e_pf *pf)
10566 struct i40e_vsi *vsi;
10568 i40e_fdir_filter_exit(pf);
10569 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
10571 i40e_vsi_release(vsi);
10575 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10576 * @vsi: PF main vsi
10577 * @seid: seid of main or channel VSIs
10579 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10580 * existed before reset
10582 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10584 struct i40e_cloud_filter *cfilter;
10585 struct i40e_pf *pf = vsi->back;
10586 struct hlist_node *node;
10589 /* Add cloud filters back if they exist */
10590 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10592 if (cfilter->seid != seid)
10595 if (cfilter->dst_port)
10596 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
10599 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
10602 dev_dbg(&pf->pdev->dev,
10603 "Failed to rebuild cloud filter, err %pe aq_err %s\n",
10605 i40e_aq_str(&pf->hw,
10606 pf->hw.aq.asq_last_status));
10614 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10615 * @vsi: PF main vsi
10617 * Rebuilds channel VSIs if they existed before reset
10619 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10621 struct i40e_channel *ch, *ch_tmp;
10624 if (list_empty(&vsi->ch_list))
10627 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10628 if (!ch->initialized)
10630 /* Proceed with creation of channel (VMDq2) VSI */
10631 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
10633 dev_info(&vsi->back->pdev->dev,
10634 "failed to rebuild channels using uplink_seid %u\n",
10638 /* Reconfigure TX queues using QTX_CTL register */
10639 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
10641 dev_info(&vsi->back->pdev->dev,
10642 "failed to configure TX rings for channel %u\n",
10646 /* update 'next_base_queue' */
10647 vsi->next_base_queue = vsi->next_base_queue +
10648 ch->num_queue_pairs;
10649 if (ch->max_tx_rate) {
10650 u64 credits = ch->max_tx_rate;
10652 if (i40e_set_bw_limit(vsi, ch->seid,
10656 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10657 dev_dbg(&vsi->back->pdev->dev,
10658 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10663 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
10665 dev_dbg(&vsi->back->pdev->dev,
10666 "Failed to rebuild cloud filters for channel VSI %u\n",
10675 * i40e_clean_xps_state - clean xps state for every tx_ring
10676 * @vsi: ptr to the VSI
10678 static void i40e_clean_xps_state(struct i40e_vsi *vsi)
10683 for (i = 0; i < vsi->num_queue_pairs; i++)
10684 if (vsi->tx_rings[i])
10685 clear_bit(__I40E_TX_XPS_INIT_DONE,
10686 vsi->tx_rings[i]->state);
10690 * i40e_prep_for_reset - prep for the core to reset
10691 * @pf: board private structure
10693 * Close up the VFs and other things in prep for PF Reset.
10695 static void i40e_prep_for_reset(struct i40e_pf *pf)
10697 struct i40e_hw *hw = &pf->hw;
10698 struct i40e_vsi *vsi;
10702 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
10703 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10705 if (i40e_check_asq_alive(&pf->hw))
10706 i40e_vc_notify_reset(pf);
10708 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10710 /* quiesce the VSIs and their queues that are not already DOWN */
10711 i40e_pf_quiesce_all_vsi(pf);
10713 i40e_pf_for_each_vsi(pf, v, vsi) {
10714 i40e_clean_xps_state(vsi);
10718 i40e_shutdown_adminq(&pf->hw);
10720 /* call shutdown HMC */
10721 if (hw->hmc.hmc_obj) {
10722 ret = i40e_shutdown_lan_hmc(hw);
10724 dev_warn(&pf->pdev->dev,
10725 "shutdown_lan_hmc failed: %d\n", ret);
10728 /* Save the current PTP time so that we can restore the time after the
10731 i40e_ptp_save_hw_time(pf);
10735 * i40e_send_version - update firmware with driver version
10738 static void i40e_send_version(struct i40e_pf *pf)
10740 struct i40e_driver_version dv;
10742 dv.major_version = 0xff;
10743 dv.minor_version = 0xff;
10744 dv.build_version = 0xff;
10745 dv.subbuild_version = 0;
10746 strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
10747 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
10751 * i40e_get_oem_version - get OEM specific version information
10752 * @hw: pointer to the hardware structure
10754 static void i40e_get_oem_version(struct i40e_hw *hw)
10756 u16 block_offset = 0xffff;
10757 u16 block_length = 0;
10758 u16 capabilities = 0;
10762 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10763 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10764 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10765 #define I40E_NVM_OEM_GEN_OFFSET 0x02
10766 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10767 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10768 #define I40E_NVM_OEM_LENGTH 3
10770 /* Check if pointer to OEM version block is valid. */
10771 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
10772 if (block_offset == 0xffff)
10775 /* Check if OEM version block has correct length. */
10776 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10778 if (block_length < I40E_NVM_OEM_LENGTH)
10781 /* Check if OEM version format is as expected. */
10782 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10784 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10787 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
10789 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10792 FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) |
10793 FIELD_PREP(I40E_OEM_RELEASE_MASK, release);
10794 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10798 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10799 * @pf: board private structure
10801 static int i40e_reset(struct i40e_pf *pf)
10803 struct i40e_hw *hw = &pf->hw;
10806 ret = i40e_pf_reset(hw);
10808 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10809 set_bit(__I40E_RESET_FAILED, pf->state);
10810 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10818 * i40e_rebuild - rebuild using a saved config
10819 * @pf: board private structure
10820 * @reinit: if the Main VSI needs to re-initialized.
10821 * @lock_acquired: indicates whether or not the lock has been acquired
10822 * before this function was called.
10824 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10826 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
10827 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10828 struct i40e_hw *hw = &pf->hw;
10829 struct i40e_veb *veb;
10834 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10835 is_recovery_mode_reported)
10836 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
10838 if (test_bit(__I40E_DOWN, pf->state) &&
10839 !test_bit(__I40E_RECOVERY_MODE, pf->state))
10840 goto clear_recovery;
10841 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10843 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10844 ret = i40e_init_adminq(&pf->hw);
10846 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
10848 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10849 goto clear_recovery;
10851 i40e_get_oem_version(&pf->hw);
10853 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
10854 /* The following delay is necessary for firmware update. */
10858 /* re-verify the eeprom if we just had an EMP reset */
10859 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
10860 i40e_verify_eeprom(pf);
10862 /* if we are going out of or into recovery mode we have to act
10863 * accordingly with regard to resources initialization
10864 * and deinitialization
10866 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10867 if (i40e_get_capabilities(pf,
10868 i40e_aqc_opc_list_func_capabilities))
10871 if (is_recovery_mode_reported) {
10872 /* we're staying in recovery mode so we'll reinitialize
10875 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10878 if (!lock_acquired)
10880 /* we're going out of recovery mode so we'll free
10881 * the IRQ allocated specifically for recovery mode
10882 * and restore the interrupt scheme
10884 free_irq(pf->pdev->irq, pf);
10885 i40e_clear_interrupt_scheme(pf);
10886 if (i40e_restore_interrupt_scheme(pf))
10890 /* tell the firmware that we're starting */
10891 i40e_send_version(pf);
10893 /* bail out in case recovery mode was detected, as there is
10894 * no need for further configuration.
10899 i40e_clear_pxe_mode(hw);
10900 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10902 goto end_core_reset;
10904 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10905 hw->func_caps.num_rx_qp, 0, 0);
10907 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10908 goto end_core_reset;
10910 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10912 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10913 goto end_core_reset;
10916 #ifdef CONFIG_I40E_DCB
10917 /* Enable FW to write a default DCB config on link-up
10918 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10919 * is not supported with new link speed
10921 if (i40e_is_tc_mqprio_enabled(pf)) {
10922 i40e_aq_set_dcb_parameters(hw, false, NULL);
10924 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10925 (hw->phy.link_info.link_speed &
10926 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10927 i40e_aq_set_dcb_parameters(hw, false, NULL);
10928 dev_warn(&pf->pdev->dev,
10929 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10930 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
10932 i40e_aq_set_dcb_parameters(hw, true, NULL);
10933 ret = i40e_init_pf_dcb(pf);
10935 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10937 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
10938 /* Continue without DCB enabled */
10943 #endif /* CONFIG_I40E_DCB */
10944 if (!lock_acquired)
10946 ret = i40e_setup_pf_switch(pf, reinit, true);
10950 /* The driver only wants link up/down and module qualification
10951 * reports from firmware. Note the negative logic.
10953 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10954 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10955 I40E_AQ_EVENT_MEDIA_NA |
10956 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10958 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
10960 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10962 /* Rebuild the VSIs and VEBs that existed before reset.
10963 * They are still in our local switch element arrays, so only
10964 * need to rebuild the switch model in the HW.
10966 * If there were VEBs but the reconstitution failed, we'll try
10967 * to recover minimal use by getting the basic PF VSI working.
10969 if (vsi->uplink_seid != pf->mac_seid) {
10970 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10973 i40e_pf_for_each_veb(pf, v, veb) {
10974 ret = i40e_reconstitute_veb(veb);
10978 /* If Main VEB failed, we're in deep doodoo,
10979 * so give up rebuilding the switch and set up
10980 * for minimal rebuild of PF VSI.
10981 * If orphan failed, we'll report the error
10982 * but try to keep going.
10984 if (veb->uplink_seid == pf->mac_seid) {
10985 dev_info(&pf->pdev->dev,
10986 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10988 vsi->uplink_seid = pf->mac_seid;
10990 } else if (veb->uplink_seid == 0) {
10991 dev_info(&pf->pdev->dev,
10992 "rebuild of orphan VEB failed: %d\n",
10998 if (vsi->uplink_seid == pf->mac_seid) {
10999 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
11000 /* no VEB, so rebuild only the Main VSI */
11001 ret = i40e_add_vsi(vsi);
11003 dev_info(&pf->pdev->dev,
11004 "rebuild of Main VSI failed: %d\n", ret);
11009 if (vsi->mqprio_qopt.max_rate[0]) {
11010 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
11011 vsi->mqprio_qopt.max_rate[0]);
11014 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
11018 credits = max_tx_rate;
11019 do_div(credits, I40E_BW_CREDIT_DIVISOR);
11020 dev_dbg(&vsi->back->pdev->dev,
11021 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
11027 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
11031 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
11032 * for this main VSI if they exist
11034 ret = i40e_rebuild_channels(vsi);
11038 /* Reconfigure hardware for allowing smaller MSS in the case
11039 * of TSO, so that we avoid the MDD being fired and causing
11040 * a reset in the case of small MSS+TSO.
11042 #define I40E_REG_MSS 0x000E64DC
11043 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
11044 #define I40E_64BYTE_MSS 0x400000
11045 val = rd32(hw, I40E_REG_MSS);
11046 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11047 val &= ~I40E_REG_MSS_MIN_MASK;
11048 val |= I40E_64BYTE_MSS;
11049 wr32(hw, I40E_REG_MSS, val);
11052 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
11054 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11056 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
11058 i40e_aq_str(&pf->hw,
11059 pf->hw.aq.asq_last_status));
11061 /* reinit the misc interrupt */
11062 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
11063 ret = i40e_setup_misc_vector(pf);
11068 /* Add a filter to drop all Flow control frames from any VSI from being
11069 * transmitted. By doing so we stop a malicious VF from sending out
11070 * PAUSE or PFC frames and potentially controlling traffic for other
11072 * The FW can still send Flow control frames if enabled.
11074 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11075 pf->main_vsi_seid);
11077 /* restart the VSIs that were rebuilt and running before the reset */
11078 i40e_pf_unquiesce_all_vsi(pf);
11080 /* Release the RTNL lock before we start resetting VFs */
11081 if (!lock_acquired)
11084 /* Restore promiscuous settings */
11085 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
11087 dev_warn(&pf->pdev->dev,
11088 "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
11089 pf->cur_promisc ? "on" : "off",
11091 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11093 i40e_reset_all_vfs(pf, true);
11095 /* tell the firmware that we're starting */
11096 i40e_send_version(pf);
11098 /* We've already released the lock, so don't do it again */
11099 goto end_core_reset;
11102 if (!lock_acquired)
11105 clear_bit(__I40E_RESET_FAILED, pf->state);
11107 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
11108 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
11112 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11113 * @pf: board private structure
11114 * @reinit: if the Main VSI needs to re-initialized.
11115 * @lock_acquired: indicates whether or not the lock has been acquired
11116 * before this function was called.
11118 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11119 bool lock_acquired)
11123 if (test_bit(__I40E_IN_REMOVE, pf->state))
11125 /* Now we wait for GRST to settle out.
11126 * We don't have to delete the VEBs or VSIs from the hw switch
11127 * because the reset will make them disappear.
11129 ret = i40e_reset(pf);
11131 i40e_rebuild(pf, reinit, lock_acquired);
11135 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11136 * @pf: board private structure
11138 * Close up the VFs and other things in prep for a Core Reset,
11139 * then get ready to rebuild the world.
11140 * @lock_acquired: indicates whether or not the lock has been acquired
11141 * before this function was called.
11143 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11145 i40e_prep_for_reset(pf);
11146 i40e_reset_and_rebuild(pf, false, lock_acquired);
11150 * i40e_handle_mdd_event
11151 * @pf: pointer to the PF structure
11153 * Called from the MDD irq handler to identify possibly malicious vfs
11155 static void i40e_handle_mdd_event(struct i40e_pf *pf)
11157 struct i40e_hw *hw = &pf->hw;
11158 bool mdd_detected = false;
11159 struct i40e_vf *vf;
11163 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11166 /* find what triggered the MDD event */
11167 reg = rd32(hw, I40E_GL_MDET_TX);
11168 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11169 u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg);
11170 u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg);
11171 u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg);
11172 u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) -
11173 pf->hw.func_caps.base_queue;
11174 if (netif_msg_tx_err(pf))
11175 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11176 event, queue, pf_num, vf_num);
11177 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11178 mdd_detected = true;
11180 reg = rd32(hw, I40E_GL_MDET_RX);
11181 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11182 u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg);
11183 u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg);
11184 u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) -
11185 pf->hw.func_caps.base_queue;
11186 if (netif_msg_rx_err(pf))
11187 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11188 event, queue, func);
11189 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11190 mdd_detected = true;
11193 if (mdd_detected) {
11194 reg = rd32(hw, I40E_PF_MDET_TX);
11195 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11196 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11197 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11199 reg = rd32(hw, I40E_PF_MDET_RX);
11200 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11201 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11202 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11206 /* see if one of the VFs needs its hand slapped */
11207 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11209 reg = rd32(hw, I40E_VP_MDET_TX(i));
11210 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11211 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11212 vf->num_mdd_events++;
11213 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11215 dev_info(&pf->pdev->dev,
11216 "Use PF Control I/F to re-enable the VF\n");
11217 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11220 reg = rd32(hw, I40E_VP_MDET_RX(i));
11221 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11222 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11223 vf->num_mdd_events++;
11224 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11226 dev_info(&pf->pdev->dev,
11227 "Use PF Control I/F to re-enable the VF\n");
11228 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
11232 /* re-enable mdd interrupt cause */
11233 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
11234 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11235 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11236 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11241 * i40e_service_task - Run the driver's async subtasks
11242 * @work: pointer to work_struct containing our data
11244 static void i40e_service_task(struct work_struct *work)
11246 struct i40e_pf *pf = container_of(work,
11249 unsigned long start_time = jiffies;
11251 /* don't bother with service tasks if a reset is in progress */
11252 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11253 test_bit(__I40E_SUSPENDED, pf->state))
11256 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
11259 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11260 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
11261 i40e_sync_filters_subtask(pf);
11262 i40e_reset_subtask(pf);
11263 i40e_handle_mdd_event(pf);
11264 i40e_vc_process_vflr_event(pf);
11265 i40e_watchdog_subtask(pf);
11266 i40e_fdir_reinit_subtask(pf);
11267 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
11268 /* Client subtask will reopen next time through. */
11269 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
11272 i40e_client_subtask(pf);
11273 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
11275 i40e_notify_client_of_l2_param_changes(
11276 pf->vsi[pf->lan_vsi]);
11278 i40e_sync_filters_subtask(pf);
11280 i40e_reset_subtask(pf);
11283 i40e_clean_adminq_subtask(pf);
11285 /* flush memory to make sure state is correct before next watchdog */
11286 smp_mb__before_atomic();
11287 clear_bit(__I40E_SERVICE_SCHED, pf->state);
11289 /* If the tasks have taken longer than one timer cycle or there
11290 * is more work to be done, reschedule the service task now
11291 * rather than wait for the timer to tick again.
11293 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11294 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11295 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11296 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11297 i40e_service_event_schedule(pf);
11301 * i40e_service_timer - timer callback
11302 * @t: timer list pointer
11304 static void i40e_service_timer(struct timer_list *t)
11306 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11308 mod_timer(&pf->service_timer,
11309 round_jiffies(jiffies + pf->service_timer_period));
11310 i40e_service_event_schedule(pf);
11314 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11315 * @vsi: the VSI being configured
11317 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11319 struct i40e_pf *pf = vsi->back;
11321 switch (vsi->type) {
11322 case I40E_VSI_MAIN:
11323 vsi->alloc_queue_pairs = pf->num_lan_qps;
11324 if (!vsi->num_tx_desc)
11325 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11326 I40E_REQ_DESCRIPTOR_MULTIPLE);
11327 if (!vsi->num_rx_desc)
11328 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11329 I40E_REQ_DESCRIPTOR_MULTIPLE);
11330 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
11331 vsi->num_q_vectors = pf->num_lan_msix;
11333 vsi->num_q_vectors = 1;
11337 case I40E_VSI_FDIR:
11338 vsi->alloc_queue_pairs = 1;
11339 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11340 I40E_REQ_DESCRIPTOR_MULTIPLE);
11341 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11342 I40E_REQ_DESCRIPTOR_MULTIPLE);
11343 vsi->num_q_vectors = pf->num_fdsb_msix;
11346 case I40E_VSI_VMDQ2:
11347 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11348 if (!vsi->num_tx_desc)
11349 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11350 I40E_REQ_DESCRIPTOR_MULTIPLE);
11351 if (!vsi->num_rx_desc)
11352 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11353 I40E_REQ_DESCRIPTOR_MULTIPLE);
11354 vsi->num_q_vectors = pf->num_vmdq_msix;
11357 case I40E_VSI_SRIOV:
11358 vsi->alloc_queue_pairs = pf->num_vf_qps;
11359 if (!vsi->num_tx_desc)
11360 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11361 I40E_REQ_DESCRIPTOR_MULTIPLE);
11362 if (!vsi->num_rx_desc)
11363 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11364 I40E_REQ_DESCRIPTOR_MULTIPLE);
11372 if (is_kdump_kernel()) {
11373 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11374 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11381 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11382 * @vsi: VSI pointer
11383 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11385 * On error: returns error code (negative)
11386 * On success: returns 0
11388 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11390 struct i40e_ring **next_rings;
11394 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11395 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11396 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11397 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11398 if (!vsi->tx_rings)
11400 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11401 if (i40e_enabled_xdp_vsi(vsi)) {
11402 vsi->xdp_rings = next_rings;
11403 next_rings += vsi->alloc_queue_pairs;
11405 vsi->rx_rings = next_rings;
11407 if (alloc_qvectors) {
11408 /* allocate memory for q_vector pointers */
11409 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11410 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11411 if (!vsi->q_vectors) {
11419 kfree(vsi->tx_rings);
11424 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11425 * @pf: board private structure
11426 * @type: type of VSI
11428 * On error: returns error code (negative)
11429 * On success: returns vsi index in PF (positive)
11431 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11434 struct i40e_vsi *vsi;
11438 /* Need to protect the allocation of the VSIs at the PF level */
11439 mutex_lock(&pf->switch_mutex);
11441 /* VSI list may be fragmented if VSI creation/destruction has
11442 * been happening. We can afford to do a quick scan to look
11443 * for any free VSIs in the list.
11445 * find next empty vsi slot, looping back around if necessary
11448 while (i < pf->num_alloc_vsi && pf->vsi[i])
11450 if (i >= pf->num_alloc_vsi) {
11452 while (i < pf->next_vsi && pf->vsi[i])
11456 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11457 vsi_idx = i; /* Found one! */
11460 goto unlock_pf; /* out of VSI slots! */
11462 pf->next_vsi = ++i;
11464 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
11471 set_bit(__I40E_VSI_DOWN, vsi->state);
11473 vsi->idx = vsi_idx;
11474 vsi->int_rate_limit = 0;
11475 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11476 pf->rss_table_size : 64;
11477 vsi->netdev_registered = false;
11478 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11479 hash_init(vsi->mac_filter_hash);
11480 vsi->irqs_ready = false;
11482 if (type == I40E_VSI_MAIN) {
11483 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
11484 if (!vsi->af_xdp_zc_qps)
11488 ret = i40e_set_num_rings_in_vsi(vsi);
11492 ret = i40e_vsi_alloc_arrays(vsi, true);
11496 /* Setup default MSIX irq handler for VSI */
11497 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
11499 /* Initialize VSI lock */
11500 spin_lock_init(&vsi->mac_filter_hash_lock);
11501 pf->vsi[vsi_idx] = vsi;
11506 bitmap_free(vsi->af_xdp_zc_qps);
11507 pf->next_vsi = i - 1;
11510 mutex_unlock(&pf->switch_mutex);
11515 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11516 * @vsi: VSI pointer
11517 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11519 * On error: returns error code (negative)
11520 * On success: returns 0
11522 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11524 /* free the ring and vector containers */
11525 if (free_qvectors) {
11526 kfree(vsi->q_vectors);
11527 vsi->q_vectors = NULL;
11529 kfree(vsi->tx_rings);
11530 vsi->tx_rings = NULL;
11531 vsi->rx_rings = NULL;
11532 vsi->xdp_rings = NULL;
11536 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11538 * @vsi: Pointer to VSI structure
11540 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11545 kfree(vsi->rss_hkey_user);
11546 vsi->rss_hkey_user = NULL;
11548 kfree(vsi->rss_lut_user);
11549 vsi->rss_lut_user = NULL;
11553 * i40e_vsi_clear - Deallocate the VSI provided
11554 * @vsi: the VSI being un-configured
11556 static int i40e_vsi_clear(struct i40e_vsi *vsi)
11558 struct i40e_pf *pf;
11567 mutex_lock(&pf->switch_mutex);
11568 if (!pf->vsi[vsi->idx]) {
11569 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11570 vsi->idx, vsi->idx, vsi->type);
11574 if (pf->vsi[vsi->idx] != vsi) {
11575 dev_err(&pf->pdev->dev,
11576 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11577 pf->vsi[vsi->idx]->idx,
11578 pf->vsi[vsi->idx]->type,
11579 vsi->idx, vsi->type);
11583 /* updates the PF for this cleared vsi */
11584 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
11585 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
11587 bitmap_free(vsi->af_xdp_zc_qps);
11588 i40e_vsi_free_arrays(vsi, true);
11589 i40e_clear_rss_config_user(vsi);
11591 pf->vsi[vsi->idx] = NULL;
11592 if (vsi->idx < pf->next_vsi)
11593 pf->next_vsi = vsi->idx;
11596 mutex_unlock(&pf->switch_mutex);
11604 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11605 * @vsi: the VSI being cleaned
11607 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11611 if (vsi->tx_rings && vsi->tx_rings[0]) {
11612 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11613 kfree_rcu(vsi->tx_rings[i], rcu);
11614 WRITE_ONCE(vsi->tx_rings[i], NULL);
11615 WRITE_ONCE(vsi->rx_rings[i], NULL);
11616 if (vsi->xdp_rings)
11617 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11623 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11624 * @vsi: the VSI being configured
11626 static int i40e_alloc_rings(struct i40e_vsi *vsi)
11628 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11629 struct i40e_pf *pf = vsi->back;
11630 struct i40e_ring *ring;
11632 /* Set basic values in the rings to be used later during open() */
11633 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11634 /* allocate space for both Tx and Rx in one shot */
11635 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
11639 ring->queue_index = i;
11640 ring->reg_idx = vsi->base_queue + i;
11641 ring->ring_active = false;
11643 ring->netdev = vsi->netdev;
11644 ring->dev = &pf->pdev->dev;
11645 ring->count = vsi->num_tx_desc;
11648 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
11649 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11650 ring->itr_setting = pf->tx_itr_default;
11651 WRITE_ONCE(vsi->tx_rings[i], ring++);
11653 if (!i40e_enabled_xdp_vsi(vsi))
11656 ring->queue_index = vsi->alloc_queue_pairs + i;
11657 ring->reg_idx = vsi->base_queue + ring->queue_index;
11658 ring->ring_active = false;
11660 ring->netdev = NULL;
11661 ring->dev = &pf->pdev->dev;
11662 ring->count = vsi->num_tx_desc;
11665 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
11666 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11667 set_ring_xdp(ring);
11668 ring->itr_setting = pf->tx_itr_default;
11669 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11672 ring->queue_index = i;
11673 ring->reg_idx = vsi->base_queue + i;
11674 ring->ring_active = false;
11676 ring->netdev = vsi->netdev;
11677 ring->dev = &pf->pdev->dev;
11678 ring->count = vsi->num_rx_desc;
11681 ring->itr_setting = pf->rx_itr_default;
11682 WRITE_ONCE(vsi->rx_rings[i], ring);
11688 i40e_vsi_clear_rings(vsi);
11693 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11694 * @pf: board private structure
11695 * @vectors: the number of MSI-X vectors to request
11697 * Returns the number of vectors reserved, or error
11699 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11701 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
11702 I40E_MIN_MSIX, vectors);
11704 dev_info(&pf->pdev->dev,
11705 "MSI-X vector reservation failed: %d\n", vectors);
11713 * i40e_init_msix - Setup the MSIX capability
11714 * @pf: board private structure
11716 * Work with the OS to set up the MSIX vectors needed.
11718 * Returns the number of vectors reserved or negative on failure
11720 static int i40e_init_msix(struct i40e_pf *pf)
11722 struct i40e_hw *hw = &pf->hw;
11723 int cpus, extra_vectors;
11727 int iwarp_requested = 0;
11729 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
11732 /* The number of vectors we'll request will be comprised of:
11733 * - Add 1 for "other" cause for Admin Queue events, etc.
11734 * - The number of LAN queue pairs
11735 * - Queues being used for RSS.
11736 * We don't need as many as max_rss_size vectors.
11737 * use rss_size instead in the calculation since that
11738 * is governed by number of cpus in the system.
11739 * - assumes symmetric Tx/Rx pairing
11740 * - The number of VMDq pairs
11741 * - The CPU count within the NUMA node if iWARP is enabled
11742 * Once we count this up, try the request.
11744 * If we can't get what we want, we'll simplify to nearly nothing
11745 * and try again. If that still fails, we punt.
11747 vectors_left = hw->func_caps.num_msix_vectors;
11750 /* reserve one vector for miscellaneous handler */
11751 if (vectors_left) {
11756 /* reserve some vectors for the main PF traffic queues. Initially we
11757 * only reserve at most 50% of the available vectors, in the case that
11758 * the number of online CPUs is large. This ensures that we can enable
11759 * extra features as well. Once we've enabled the other features, we
11760 * will use any remaining vectors to reach as close as we can to the
11761 * number of online CPUs.
11763 cpus = num_online_cpus();
11764 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11765 vectors_left -= pf->num_lan_msix;
11767 /* reserve one vector for sideband flow director */
11768 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
11769 if (vectors_left) {
11770 pf->num_fdsb_msix = 1;
11774 pf->num_fdsb_msix = 0;
11778 /* can we reserve enough for iWARP? */
11779 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11780 iwarp_requested = pf->num_iwarp_msix;
11783 pf->num_iwarp_msix = 0;
11784 else if (vectors_left < pf->num_iwarp_msix)
11785 pf->num_iwarp_msix = 1;
11786 v_budget += pf->num_iwarp_msix;
11787 vectors_left -= pf->num_iwarp_msix;
11790 /* any vectors left over go for VMDq support */
11791 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) {
11792 if (!vectors_left) {
11793 pf->num_vmdq_msix = 0;
11794 pf->num_vmdq_qps = 0;
11796 int vmdq_vecs_wanted =
11797 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11799 min_t(int, vectors_left, vmdq_vecs_wanted);
11801 /* if we're short on vectors for what's desired, we limit
11802 * the queues per vmdq. If this is still more than are
11803 * available, the user will need to change the number of
11804 * queues/vectors used by the PF later with the ethtool
11807 if (vectors_left < vmdq_vecs_wanted) {
11808 pf->num_vmdq_qps = 1;
11809 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11810 vmdq_vecs = min_t(int,
11814 pf->num_vmdq_msix = pf->num_vmdq_qps;
11816 v_budget += vmdq_vecs;
11817 vectors_left -= vmdq_vecs;
11821 /* On systems with a large number of SMP cores, we previously limited
11822 * the number of vectors for num_lan_msix to be at most 50% of the
11823 * available vectors, to allow for other features. Now, we add back
11824 * the remaining vectors. However, we ensure that the total
11825 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11826 * calculate the number of vectors we can add without going over the
11827 * cap of CPUs. For systems with a small number of CPUs this will be
11830 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11831 pf->num_lan_msix += extra_vectors;
11832 vectors_left -= extra_vectors;
11834 WARN(vectors_left < 0,
11835 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11837 v_budget += pf->num_lan_msix;
11838 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11840 if (!pf->msix_entries)
11843 for (i = 0; i < v_budget; i++)
11844 pf->msix_entries[i].entry = i;
11845 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11847 if (v_actual < I40E_MIN_MSIX) {
11848 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
11849 kfree(pf->msix_entries);
11850 pf->msix_entries = NULL;
11851 pci_disable_msix(pf->pdev);
11854 } else if (v_actual == I40E_MIN_MSIX) {
11855 /* Adjust for minimal MSIX use */
11856 pf->num_vmdq_vsis = 0;
11857 pf->num_vmdq_qps = 0;
11858 pf->num_lan_qps = 1;
11859 pf->num_lan_msix = 1;
11861 } else if (v_actual != v_budget) {
11862 /* If we have limited resources, we will start with no vectors
11863 * for the special features and then allocate vectors to some
11864 * of these features based on the policy and at the end disable
11865 * the features that did not get any vectors.
11869 dev_info(&pf->pdev->dev,
11870 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11871 v_actual, v_budget);
11872 /* reserve the misc vector */
11873 vec = v_actual - 1;
11875 /* Scale vector usage down */
11876 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11877 pf->num_vmdq_vsis = 1;
11878 pf->num_vmdq_qps = 1;
11880 /* partition out the remaining vectors */
11883 pf->num_lan_msix = 1;
11886 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11887 pf->num_lan_msix = 1;
11888 pf->num_iwarp_msix = 1;
11890 pf->num_lan_msix = 2;
11894 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
11895 pf->num_iwarp_msix = min_t(int, (vec / 3),
11897 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11898 I40E_DEFAULT_NUM_VMDQ_VSI);
11900 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11901 I40E_DEFAULT_NUM_VMDQ_VSI);
11903 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
11904 pf->num_fdsb_msix = 1;
11907 pf->num_lan_msix = min_t(int,
11908 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11910 pf->num_lan_qps = pf->num_lan_msix;
11915 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) {
11916 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11917 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
11918 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
11920 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) {
11921 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11922 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
11925 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
11926 pf->num_iwarp_msix == 0) {
11927 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11928 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
11930 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11931 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11933 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11935 pf->num_iwarp_msix);
11941 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11942 * @vsi: the VSI being configured
11943 * @v_idx: index of the vector in the vsi struct
11945 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11947 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11949 struct i40e_q_vector *q_vector;
11951 /* allocate q_vector */
11952 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11956 q_vector->vsi = vsi;
11957 q_vector->v_idx = v_idx;
11958 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11961 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
11963 /* tie q_vector and vsi together */
11964 vsi->q_vectors[v_idx] = q_vector;
11970 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11971 * @vsi: the VSI being configured
11973 * We allocate one q_vector per queue interrupt. If allocation fails we
11976 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11978 struct i40e_pf *pf = vsi->back;
11979 int err, v_idx, num_q_vectors;
11981 /* if not MSIX, give the one vector only to the LAN VSI */
11982 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
11983 num_q_vectors = vsi->num_q_vectors;
11984 else if (vsi == pf->vsi[pf->lan_vsi])
11989 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11990 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
11999 i40e_free_q_vector(vsi, v_idx);
12005 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12006 * @pf: board private structure to initialize
12008 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
12013 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
12014 vectors = i40e_init_msix(pf);
12016 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12017 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
12018 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
12019 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
12020 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
12021 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
12022 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12023 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
12024 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
12025 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12027 /* rework the queue expectations without MSIX */
12028 i40e_determine_queue_usage(pf);
12032 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
12033 test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
12034 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12035 vectors = pci_enable_msi(pf->pdev);
12037 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12039 clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
12041 vectors = 1; /* one MSI or Legacy vector */
12044 if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) &&
12045 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
12046 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12048 /* set up vector assignment tracking */
12049 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12050 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12054 pf->irq_pile->num_entries = vectors;
12056 /* track first vector for misc interrupts, ignore return */
12057 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
12063 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12064 * @pf: private board data structure
12066 * Restore the interrupt scheme that was cleared when we suspended the
12067 * device. This should be called during resume to re-allocate the q_vectors
12068 * and reacquire IRQs.
12070 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12072 struct i40e_vsi *vsi;
12075 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
12076 * scheme. We need to re-enabled them here in order to attempt to
12077 * re-acquire the MSI or MSI-X vectors
12079 set_bit(I40E_FLAG_MSI_ENA, pf->flags);
12080 set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12082 err = i40e_init_interrupt_scheme(pf);
12086 /* Now that we've re-acquired IRQs, we need to remap the vectors and
12087 * rings together again.
12089 i40e_pf_for_each_vsi(pf, i, vsi) {
12090 err = i40e_vsi_alloc_q_vectors(vsi);
12094 i40e_vsi_map_rings_to_vectors(vsi);
12097 err = i40e_setup_misc_vector(pf);
12101 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
12102 i40e_client_update_msix_info(pf);
12109 i40e_vsi_free_q_vectors(pf->vsi[i]);
12116 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12117 * non queue events in recovery mode
12118 * @pf: board private structure
12120 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12121 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12122 * This is handled differently than in recovery mode since no Tx/Rx resources
12123 * are being allocated.
12125 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12129 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
12130 err = i40e_setup_misc_vector(pf);
12133 dev_info(&pf->pdev->dev,
12134 "MSI-X misc vector request failed, error %d\n",
12139 u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED;
12141 err = request_irq(pf->pdev->irq, i40e_intr, flags,
12145 dev_info(&pf->pdev->dev,
12146 "MSI/legacy misc vector request failed, error %d\n",
12150 i40e_enable_misc_int_causes(pf);
12151 i40e_irq_dynamic_enable_icr0(pf);
12158 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12159 * @pf: board private structure
12161 * This sets up the handler for MSIX 0, which is used to manage the
12162 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12163 * when in MSI or Legacy interrupt mode.
12165 static int i40e_setup_misc_vector(struct i40e_pf *pf)
12167 struct i40e_hw *hw = &pf->hw;
12170 /* Only request the IRQ once, the first time through. */
12171 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
12172 err = request_irq(pf->msix_entries[0].vector,
12173 i40e_intr, 0, pf->int_name, pf);
12175 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
12176 dev_info(&pf->pdev->dev,
12177 "request_irq for %s failed: %d\n",
12178 pf->int_name, err);
12183 i40e_enable_misc_int_causes(pf);
12185 /* associate no queues to the misc vector */
12186 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12187 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12191 i40e_irq_dynamic_enable_icr0(pf);
12197 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12198 * @vsi: Pointer to vsi structure
12199 * @seed: Buffter to store the hash keys
12200 * @lut: Buffer to store the lookup table entries
12201 * @lut_size: Size of buffer to store the lookup table entries
12203 * Return 0 on success, negative on failure
12205 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12206 u8 *lut, u16 lut_size)
12208 struct i40e_pf *pf = vsi->back;
12209 struct i40e_hw *hw = &pf->hw;
12213 ret = i40e_aq_get_rss_key(hw, vsi->id,
12214 (struct i40e_aqc_get_set_rss_key_data *)seed);
12216 dev_info(&pf->pdev->dev,
12217 "Cannot get RSS key, err %pe aq_err %s\n",
12219 i40e_aq_str(&pf->hw,
12220 pf->hw.aq.asq_last_status));
12226 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12228 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
12230 dev_info(&pf->pdev->dev,
12231 "Cannot get RSS lut, err %pe aq_err %s\n",
12233 i40e_aq_str(&pf->hw,
12234 pf->hw.aq.asq_last_status));
12243 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12244 * @vsi: Pointer to vsi structure
12245 * @seed: RSS hash seed
12246 * @lut: Lookup table
12247 * @lut_size: Lookup table size
12249 * Returns 0 on success, negative on failure
12251 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12252 const u8 *lut, u16 lut_size)
12254 struct i40e_pf *pf = vsi->back;
12255 struct i40e_hw *hw = &pf->hw;
12256 u16 vf_id = vsi->vf_id;
12259 /* Fill out hash function seed */
12261 u32 *seed_dw = (u32 *)seed;
12263 if (vsi->type == I40E_VSI_MAIN) {
12264 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12265 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12266 } else if (vsi->type == I40E_VSI_SRIOV) {
12267 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12268 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12270 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12275 u32 *lut_dw = (u32 *)lut;
12277 if (vsi->type == I40E_VSI_MAIN) {
12278 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12280 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12281 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12282 } else if (vsi->type == I40E_VSI_SRIOV) {
12283 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12285 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12286 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12288 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12297 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12298 * @vsi: Pointer to VSI structure
12299 * @seed: Buffer to store the keys
12300 * @lut: Buffer to store the lookup table entries
12301 * @lut_size: Size of buffer to store the lookup table entries
12303 * Returns 0 on success, negative on failure
12305 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12306 u8 *lut, u16 lut_size)
12308 struct i40e_pf *pf = vsi->back;
12309 struct i40e_hw *hw = &pf->hw;
12313 u32 *seed_dw = (u32 *)seed;
12315 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12316 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12319 u32 *lut_dw = (u32 *)lut;
12321 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12323 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12324 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12331 * i40e_config_rss - Configure RSS keys and lut
12332 * @vsi: Pointer to VSI structure
12333 * @seed: RSS hash seed
12334 * @lut: Lookup table
12335 * @lut_size: Lookup table size
12337 * Returns 0 on success, negative on failure
12339 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12341 struct i40e_pf *pf = vsi->back;
12343 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
12344 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12346 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12350 * i40e_get_rss - Get RSS keys and lut
12351 * @vsi: Pointer to VSI structure
12352 * @seed: Buffer to store the keys
12353 * @lut: Buffer to store the lookup table entries
12354 * @lut_size: Size of buffer to store the lookup table entries
12356 * Returns 0 on success, negative on failure
12358 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12360 struct i40e_pf *pf = vsi->back;
12362 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
12363 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12365 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12369 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12370 * @pf: Pointer to board private structure
12371 * @lut: Lookup table
12372 * @rss_table_size: Lookup table size
12373 * @rss_size: Range of queue number for hashing
12375 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12376 u16 rss_table_size, u16 rss_size)
12380 for (i = 0; i < rss_table_size; i++)
12381 lut[i] = i % rss_size;
12385 * i40e_pf_config_rss - Prepare for RSS if used
12386 * @pf: board private structure
12388 static int i40e_pf_config_rss(struct i40e_pf *pf)
12390 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12391 u8 seed[I40E_HKEY_ARRAY_SIZE];
12393 struct i40e_hw *hw = &pf->hw;
12398 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12399 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12400 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12401 hena |= i40e_pf_get_default_rss_hena(pf);
12403 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
12404 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
12406 /* Determine the RSS table size based on the hardware capabilities */
12407 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12408 reg_val = (pf->rss_table_size == 512) ?
12409 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12410 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12411 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12413 /* Determine the RSS size of the VSI */
12414 if (!vsi->rss_size) {
12416 /* If the firmware does something weird during VSI init, we
12417 * could end up with zero TCs. Check for that to avoid
12418 * divide-by-zero. It probably won't pass traffic, but it also
12421 qcount = vsi->num_queue_pairs /
12422 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12423 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12425 if (!vsi->rss_size)
12428 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
12432 /* Use user configured lut if there is one, otherwise use default */
12433 if (vsi->rss_lut_user)
12434 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12436 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
12438 /* Use user configured hash key if there is one, otherwise
12441 if (vsi->rss_hkey_user)
12442 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12444 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
12445 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
12452 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12453 * @pf: board private structure
12454 * @queue_count: the requested queue count for rss.
12456 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12457 * count which may be different from the requested queue count.
12458 * Note: expects to be called while under rtnl_lock()
12460 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12462 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12465 if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
12468 queue_count = min_t(int, queue_count, num_online_cpus());
12469 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12471 if (queue_count != vsi->num_queue_pairs) {
12474 vsi->req_queue_pairs = queue_count;
12475 i40e_prep_for_reset(pf);
12476 if (test_bit(__I40E_IN_REMOVE, pf->state))
12477 return pf->alloc_rss_size;
12479 pf->alloc_rss_size = new_rss_size;
12481 i40e_reset_and_rebuild(pf, true, true);
12483 /* Discard the user configured hash keys and lut, if less
12484 * queues are enabled.
12486 if (queue_count < vsi->rss_size) {
12487 i40e_clear_rss_config_user(vsi);
12488 dev_dbg(&pf->pdev->dev,
12489 "discard user configured hash keys and lut\n");
12492 /* Reset vsi->rss_size, as number of enabled queues changed */
12493 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12494 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12496 i40e_pf_config_rss(pf);
12498 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12499 vsi->req_queue_pairs, pf->rss_size_max);
12500 return pf->alloc_rss_size;
12504 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12505 * @pf: board private structure
12507 int i40e_get_partition_bw_setting(struct i40e_pf *pf)
12509 bool min_valid, max_valid;
12510 u32 max_bw, min_bw;
12513 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
12514 &min_valid, &max_valid);
12518 pf->min_bw = min_bw;
12520 pf->max_bw = max_bw;
12527 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12528 * @pf: board private structure
12530 int i40e_set_partition_bw_setting(struct i40e_pf *pf)
12532 struct i40e_aqc_configure_partition_bw_data bw_data;
12535 memset(&bw_data, 0, sizeof(bw_data));
12537 /* Set the valid bit for this PF */
12538 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12539 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12540 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12542 /* Set the new bandwidths */
12543 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
12549 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12550 * @pf: board private structure
12552 int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12554 /* Commit temporary BW setting to permanent NVM image */
12555 enum i40e_admin_queue_err last_aq_status;
12559 if (pf->hw.partition_id != 1) {
12560 dev_info(&pf->pdev->dev,
12561 "Commit BW only works on partition 1! This is partition %d",
12562 pf->hw.partition_id);
12564 goto bw_commit_out;
12567 /* Acquire NVM for read access */
12568 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
12569 last_aq_status = pf->hw.aq.asq_last_status;
12571 dev_info(&pf->pdev->dev,
12572 "Cannot acquire NVM for read access, err %pe aq_err %s\n",
12574 i40e_aq_str(&pf->hw, last_aq_status));
12575 goto bw_commit_out;
12578 /* Read word 0x10 of NVM - SW compatibility word 1 */
12579 ret = i40e_aq_read_nvm(&pf->hw,
12580 I40E_SR_NVM_CONTROL_WORD,
12581 0x10, sizeof(nvm_word), &nvm_word,
12583 /* Save off last admin queue command status before releasing
12586 last_aq_status = pf->hw.aq.asq_last_status;
12587 i40e_release_nvm(&pf->hw);
12589 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
12591 i40e_aq_str(&pf->hw, last_aq_status));
12592 goto bw_commit_out;
12595 /* Wait a bit for NVM release to complete */
12598 /* Acquire NVM for write access */
12599 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
12600 last_aq_status = pf->hw.aq.asq_last_status;
12602 dev_info(&pf->pdev->dev,
12603 "Cannot acquire NVM for write access, err %pe aq_err %s\n",
12605 i40e_aq_str(&pf->hw, last_aq_status));
12606 goto bw_commit_out;
12608 /* Write it back out unchanged to initiate update NVM,
12609 * which will force a write of the shadow (alt) RAM to
12610 * the NVM - thus storing the bandwidth values permanently.
12612 ret = i40e_aq_update_nvm(&pf->hw,
12613 I40E_SR_NVM_CONTROL_WORD,
12614 0x10, sizeof(nvm_word),
12615 &nvm_word, true, 0, NULL);
12616 /* Save off last admin queue command status before releasing
12619 last_aq_status = pf->hw.aq.asq_last_status;
12620 i40e_release_nvm(&pf->hw);
12622 dev_info(&pf->pdev->dev,
12623 "BW settings NOT SAVED, err %pe aq_err %s\n",
12625 i40e_aq_str(&pf->hw, last_aq_status));
12632 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12633 * if total port shutdown feature is enabled for this PF
12634 * @pf: board private structure
12636 static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12638 #define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12639 #define I40E_FEATURES_ENABLE_PTR 0x2A
12640 #define I40E_CURRENT_SETTING_PTR 0x2B
12641 #define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12642 #define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12643 #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12644 #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12645 u16 sr_emp_sr_settings_ptr = 0;
12646 u16 features_enable = 0;
12647 u16 link_behavior = 0;
12648 int read_status = 0;
12651 read_status = i40e_read_nvm_word(&pf->hw,
12652 I40E_SR_EMP_SR_SETTINGS_PTR,
12653 &sr_emp_sr_settings_ptr);
12656 read_status = i40e_read_nvm_word(&pf->hw,
12657 sr_emp_sr_settings_ptr +
12658 I40E_FEATURES_ENABLE_PTR,
12662 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12663 read_status = i40e_read_nvm_module_data(&pf->hw,
12664 I40E_SR_EMP_SR_SETTINGS_PTR,
12665 I40E_CURRENT_SETTING_PTR,
12666 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12667 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12671 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12672 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12677 dev_warn(&pf->pdev->dev,
12678 "total-port-shutdown feature is off due to read nvm error: %pe\n",
12679 ERR_PTR(read_status));
12684 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12685 * @pf: board private structure to initialize
12687 * i40e_sw_init initializes the Adapter private data structure.
12688 * Fields are initialized based on PCI device information and
12689 * OS network device settings (MTU size).
12691 static int i40e_sw_init(struct i40e_pf *pf)
12697 /* Set default capability flags */
12698 bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS);
12699 set_bit(I40E_FLAG_MSI_ENA, pf->flags);
12700 set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
12702 /* Set default ITR */
12703 pf->rx_itr_default = I40E_ITR_RX_DEF;
12704 pf->tx_itr_default = I40E_ITR_TX_DEF;
12706 /* Depending on PF configurations, it is possible that the RSS
12707 * maximum might end up larger than the available queues
12709 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12710 pf->alloc_rss_size = 1;
12711 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12712 pf->rss_size_max = min_t(int, pf->rss_size_max,
12713 pf->hw.func_caps.num_tx_qp);
12715 /* find the next higher power-of-2 of num cpus */
12716 pow = roundup_pow_of_two(num_online_cpus());
12717 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12719 if (pf->hw.func_caps.rss) {
12720 set_bit(I40E_FLAG_RSS_ENA, pf->flags);
12721 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12722 num_online_cpus());
12725 /* MFP mode enabled */
12726 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12727 set_bit(I40E_FLAG_MFP_ENA, pf->flags);
12728 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12729 if (i40e_get_partition_bw_setting(pf)) {
12730 dev_warn(&pf->pdev->dev,
12731 "Could not get partition bw settings\n");
12733 dev_info(&pf->pdev->dev,
12734 "Partition BW Min = %8.8x, Max = %8.8x\n",
12735 pf->min_bw, pf->max_bw);
12737 /* nudge the Tx scheduler */
12738 i40e_set_partition_bw_setting(pf);
12742 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12743 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12744 set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
12745 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
12746 pf->hw.num_partitions > 1)
12747 dev_info(&pf->pdev->dev,
12748 "Flow Director Sideband mode Disabled in MFP mode\n");
12750 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12751 pf->fdir_pf_filter_count =
12752 pf->hw.func_caps.fd_filters_guaranteed;
12753 pf->hw.fdir_shared_filter_count =
12754 pf->hw.func_caps.fd_filters_best_effort;
12757 /* Enable HW ATR eviction if possible */
12758 if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
12759 set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags);
12761 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12762 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12763 set_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
12764 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12767 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12768 set_bit(I40E_FLAG_IWARP_ENA, pf->flags);
12769 /* IWARP needs one extra vector for CQP just like MISC.*/
12770 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12772 /* Stopping FW LLDP engine is supported on XL710 and X722
12773 * starting from FW versions determined in i40e_init_adminq.
12774 * Stopping the FW LLDP engine is not supported on XL710
12775 * if NPAR is functioning so unset this hw flag in this case.
12777 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12778 pf->hw.func_caps.npar_enable)
12779 clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps);
12781 #ifdef CONFIG_PCI_IOV
12782 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12783 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12784 set_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
12785 pf->num_req_vfs = min_t(int,
12786 pf->hw.func_caps.num_vfs,
12787 I40E_MAX_VF_COUNT);
12789 #endif /* CONFIG_PCI_IOV */
12790 pf->lan_veb = I40E_NO_VEB;
12791 pf->lan_vsi = I40E_NO_VSI;
12793 /* By default FW has this off for performance reasons */
12794 clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
12796 /* set up queue assignment tracking */
12797 size = sizeof(struct i40e_lump_tracking)
12798 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12799 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12800 if (!pf->qp_pile) {
12804 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12806 pf->tx_timeout_recovery_level = 1;
12808 if (pf->hw.mac.type != I40E_MAC_X722 &&
12809 i40e_is_total_port_shutdown_enabled(pf)) {
12810 /* Link down on close must be on when total port shutdown
12811 * is enabled for a given port
12813 set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
12814 set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
12815 dev_info(&pf->pdev->dev,
12816 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12818 mutex_init(&pf->switch_mutex);
12825 * i40e_set_ntuple - set the ntuple feature flag and take action
12826 * @pf: board private structure to initialize
12827 * @features: the feature set that the stack is suggesting
12829 * returns a bool to indicate if reset needs to happen
12831 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12833 bool need_reset = false;
12835 /* Check if Flow Director n-tuple support was enabled or disabled. If
12836 * the state changed, we need to reset.
12838 if (features & NETIF_F_NTUPLE) {
12839 /* Enable filters and mark for reset */
12840 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
12842 /* enable FD_SB only if there is MSI-X vector and no cloud
12845 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12846 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12847 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12850 /* turn off filters, mark for reset and clear SW filter list */
12851 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
12853 i40e_fdir_filter_exit(pf);
12855 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
12856 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12857 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
12859 /* reset fd counters */
12860 pf->fd_add_err = 0;
12861 pf->fd_atr_cnt = 0;
12862 /* if ATR was auto disabled it can be re-enabled. */
12863 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12864 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
12865 (I40E_DEBUG_FD & pf->hw.debug_mask))
12866 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12872 * i40e_clear_rss_lut - clear the rx hash lookup table
12873 * @vsi: the VSI being configured
12875 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12877 struct i40e_pf *pf = vsi->back;
12878 struct i40e_hw *hw = &pf->hw;
12879 u16 vf_id = vsi->vf_id;
12882 if (vsi->type == I40E_VSI_MAIN) {
12883 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12884 wr32(hw, I40E_PFQF_HLUT(i), 0);
12885 } else if (vsi->type == I40E_VSI_SRIOV) {
12886 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12887 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12889 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12894 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12896 * @ena: flag to indicate the on/off setting
12898 static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
12900 bool if_running = netif_running(vsi->netdev) &&
12901 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state);
12907 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
12909 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
12917 * i40e_set_features - set the netdev feature flags
12918 * @netdev: ptr to the netdev being adjusted
12919 * @features: the feature set that the stack is suggesting
12920 * Note: expects to be called while under rtnl_lock()
12922 static int i40e_set_features(struct net_device *netdev,
12923 netdev_features_t features)
12925 struct i40e_netdev_priv *np = netdev_priv(netdev);
12926 struct i40e_vsi *vsi = np->vsi;
12927 struct i40e_pf *pf = vsi->back;
12930 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12931 i40e_pf_config_rss(pf);
12932 else if (!(features & NETIF_F_RXHASH) &&
12933 netdev->features & NETIF_F_RXHASH)
12934 i40e_clear_rss_lut(vsi);
12936 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12937 i40e_vlan_stripping_enable(vsi);
12939 i40e_vlan_stripping_disable(vsi);
12941 if (!(features & NETIF_F_HW_TC) &&
12942 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12943 dev_err(&pf->pdev->dev,
12944 "Offloaded tc filters active, can't turn hw_tc_offload off");
12948 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12949 i40e_del_all_macvlans(vsi);
12951 need_reset = i40e_set_ntuple(pf, features);
12954 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12956 if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
12957 return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
12962 static int i40e_udp_tunnel_set_port(struct net_device *netdev,
12963 unsigned int table, unsigned int idx,
12964 struct udp_tunnel_info *ti)
12966 struct i40e_netdev_priv *np = netdev_priv(netdev);
12967 struct i40e_hw *hw = &np->vsi->back->hw;
12968 u8 type, filter_index;
12971 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
12972 I40E_AQC_TUNNEL_TYPE_NGE;
12974 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
12977 netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
12979 i40e_aq_str(hw, hw->aq.asq_last_status));
12983 udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
12987 static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
12988 unsigned int table, unsigned int idx,
12989 struct udp_tunnel_info *ti)
12991 struct i40e_netdev_priv *np = netdev_priv(netdev);
12992 struct i40e_hw *hw = &np->vsi->back->hw;
12995 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
12997 netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
12999 i40e_aq_str(hw, hw->aq.asq_last_status));
13006 static int i40e_get_phys_port_id(struct net_device *netdev,
13007 struct netdev_phys_item_id *ppid)
13009 struct i40e_netdev_priv *np = netdev_priv(netdev);
13010 struct i40e_pf *pf = np->vsi->back;
13011 struct i40e_hw *hw = &pf->hw;
13013 if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps))
13014 return -EOPNOTSUPP;
13016 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13017 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13023 * i40e_ndo_fdb_add - add an entry to the hardware database
13024 * @ndm: the input from the stack
13025 * @tb: pointer to array of nladdr (unused)
13026 * @dev: the net device pointer
13027 * @addr: the MAC address entry being added
13029 * @flags: instructions from stack about fdb operation
13030 * @extack: netlink extended ack, unused currently
13032 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13033 struct net_device *dev,
13034 const unsigned char *addr, u16 vid,
13036 struct netlink_ext_ack *extack)
13038 struct i40e_netdev_priv *np = netdev_priv(dev);
13039 struct i40e_pf *pf = np->vsi->back;
13042 if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags))
13043 return -EOPNOTSUPP;
13046 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13050 /* Hardware does not support aging addresses so if a
13051 * ndm_state is given only allow permanent addresses
13053 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13054 netdev_info(dev, "FDB only supports static addresses\n");
13058 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13059 err = dev_uc_add_excl(dev, addr);
13060 else if (is_multicast_ether_addr(addr))
13061 err = dev_mc_add_excl(dev, addr);
13065 /* Only return duplicate errors if NLM_F_EXCL is set */
13066 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13073 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13074 * @dev: the netdev being configured
13075 * @nlh: RTNL message
13076 * @flags: bridge flags
13077 * @extack: netlink extended ack
13079 * Inserts a new hardware bridge if not already created and
13080 * enables the bridging mode requested (VEB or VEPA). If the
13081 * hardware bridge has already been inserted and the request
13082 * is to change the mode then that requires a PF reset to
13083 * allow rebuild of the components with required hardware
13084 * bridge mode enabled.
13086 * Note: expects to be called while under rtnl_lock()
13088 static int i40e_ndo_bridge_setlink(struct net_device *dev,
13089 struct nlmsghdr *nlh,
13091 struct netlink_ext_ack *extack)
13093 struct i40e_netdev_priv *np = netdev_priv(dev);
13094 struct i40e_vsi *vsi = np->vsi;
13095 struct i40e_pf *pf = vsi->back;
13096 struct nlattr *attr, *br_spec;
13097 struct i40e_veb *veb;
13100 /* Only for PF VSI for now */
13101 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13102 return -EOPNOTSUPP;
13104 /* Find the HW bridge for PF VSI */
13105 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
13107 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13111 nla_for_each_nested(attr, br_spec, rem) {
13114 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13117 mode = nla_get_u16(attr);
13118 if ((mode != BRIDGE_MODE_VEPA) &&
13119 (mode != BRIDGE_MODE_VEB))
13122 /* Insert a new HW bridge */
13124 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13125 vsi->tc_config.enabled_tc);
13127 veb->bridge_mode = mode;
13128 i40e_config_bridge_mode(veb);
13130 /* No Bridge HW offload available */
13134 } else if (mode != veb->bridge_mode) {
13135 /* Existing HW bridge but different mode needs reset */
13136 veb->bridge_mode = mode;
13137 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
13138 if (mode == BRIDGE_MODE_VEB)
13139 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
13141 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
13142 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
13151 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13154 * @seq: RTNL message seq #
13155 * @dev: the netdev being configured
13156 * @filter_mask: unused
13157 * @nlflags: netlink flags passed in
13159 * Return the mode in which the hardware bridge is operating in
13162 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13163 struct net_device *dev,
13164 u32 __always_unused filter_mask,
13167 struct i40e_netdev_priv *np = netdev_priv(dev);
13168 struct i40e_vsi *vsi = np->vsi;
13169 struct i40e_pf *pf = vsi->back;
13170 struct i40e_veb *veb;
13172 /* Only for PF VSI for now */
13173 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13174 return -EOPNOTSUPP;
13176 /* Find the HW bridge for the PF VSI */
13177 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
13181 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
13182 0, 0, nlflags, filter_mask, NULL);
13186 * i40e_features_check - Validate encapsulated packet conforms to limits
13188 * @dev: This physical port's netdev
13189 * @features: Offload features that the stack believes apply
13191 static netdev_features_t i40e_features_check(struct sk_buff *skb,
13192 struct net_device *dev,
13193 netdev_features_t features)
13197 /* No point in doing any of this if neither checksum nor GSO are
13198 * being requested for this frame. We can rule out both by just
13199 * checking for CHECKSUM_PARTIAL
13201 if (skb->ip_summed != CHECKSUM_PARTIAL)
13204 /* We cannot support GSO if the MSS is going to be less than
13205 * 64 bytes. If it is then we need to drop support for GSO.
13207 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13208 features &= ~NETIF_F_GSO_MASK;
13210 /* MACLEN can support at most 63 words */
13211 len = skb_network_offset(skb);
13212 if (len & ~(63 * 2))
13215 /* IPLEN and EIPLEN can support at most 127 dwords */
13216 len = skb_network_header_len(skb);
13217 if (len & ~(127 * 4))
13220 if (skb->encapsulation) {
13221 /* L4TUNLEN can support 127 words */
13222 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13223 if (len & ~(127 * 2))
13226 /* IPLEN can support at most 127 dwords */
13227 len = skb_inner_transport_header(skb) -
13228 skb_inner_network_header(skb);
13229 if (len & ~(127 * 4))
13233 /* No need to validate L4LEN as TCP is the only protocol with a
13234 * flexible value and we support all possible values supported
13235 * by TCP, which is at most 15 dwords
13240 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13244 * i40e_xdp_setup - add/remove an XDP program
13245 * @vsi: VSI to changed
13246 * @prog: XDP program
13247 * @extack: netlink extended ack
13249 static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13250 struct netlink_ext_ack *extack)
13252 int frame_size = i40e_max_vsi_frame_size(vsi, prog);
13253 struct i40e_pf *pf = vsi->back;
13254 struct bpf_prog *old_prog;
13258 /* Don't allow frames that span over multiple buffers */
13259 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
13260 NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
13264 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13265 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13268 i40e_prep_for_reset(pf);
13270 /* VSI shall be deleted in a moment, just return EINVAL */
13271 if (test_bit(__I40E_IN_REMOVE, pf->state))
13274 old_prog = xchg(&vsi->xdp_prog, prog);
13278 xdp_features_clear_redirect_target(vsi->netdev);
13279 /* Wait until ndo_xsk_wakeup completes. */
13282 i40e_reset_and_rebuild(pf, true, true);
13285 if (!i40e_enabled_xdp_vsi(vsi) && prog) {
13286 if (i40e_realloc_rx_bi_zc(vsi, true))
13288 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
13289 if (i40e_realloc_rx_bi_zc(vsi, false))
13293 for (i = 0; i < vsi->num_queue_pairs; i++)
13294 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13297 bpf_prog_put(old_prog);
13299 /* Kick start the NAPI context if there is an AF_XDP socket open
13300 * on that queue id. This so that receiving will start.
13302 if (need_reset && prog) {
13303 for (i = 0; i < vsi->num_queue_pairs; i++)
13304 if (vsi->xdp_rings[i]->xsk_pool)
13305 (void)i40e_xsk_wakeup(vsi->netdev, i,
13307 xdp_features_set_redirect_target(vsi->netdev, true);
13314 * i40e_enter_busy_conf - Enters busy config state
13317 * Returns 0 on success, <0 for failure.
13319 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13321 struct i40e_pf *pf = vsi->back;
13324 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
13328 usleep_range(1000, 2000);
13335 * i40e_exit_busy_conf - Exits busy config state
13338 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13340 struct i40e_pf *pf = vsi->back;
13342 clear_bit(__I40E_CONFIG_BUSY, pf->state);
13346 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13348 * @queue_pair: queue pair
13350 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13352 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13353 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13354 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13355 sizeof(vsi->tx_rings[queue_pair]->stats));
13356 if (i40e_enabled_xdp_vsi(vsi)) {
13357 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13358 sizeof(vsi->xdp_rings[queue_pair]->stats));
13363 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13365 * @queue_pair: queue pair
13367 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13369 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
13370 if (i40e_enabled_xdp_vsi(vsi)) {
13371 /* Make sure that in-progress ndo_xdp_xmit calls are
13375 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
13377 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13381 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13383 * @queue_pair: queue pair
13384 * @enable: true for enable, false for disable
13386 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13389 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13390 struct i40e_q_vector *q_vector = rxr->q_vector;
13395 /* All rings in a qp belong to the same qvector. */
13396 if (q_vector->rx.ring || q_vector->tx.ring) {
13398 napi_enable(&q_vector->napi);
13400 napi_disable(&q_vector->napi);
13405 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13407 * @queue_pair: queue pair
13408 * @enable: true for enable, false for disable
13410 * Returns 0 on success, <0 on failure.
13412 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13415 struct i40e_pf *pf = vsi->back;
13418 pf_q = vsi->base_queue + queue_pair;
13419 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
13420 false /*is xdp*/, enable);
13422 dev_info(&pf->pdev->dev,
13423 "VSI seid %d Tx ring %d %sable timeout\n",
13424 vsi->seid, pf_q, (enable ? "en" : "dis"));
13428 i40e_control_rx_q(pf, pf_q, enable);
13429 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13431 dev_info(&pf->pdev->dev,
13432 "VSI seid %d Rx ring %d %sable timeout\n",
13433 vsi->seid, pf_q, (enable ? "en" : "dis"));
13437 /* Due to HW errata, on Rx disable only, the register can
13438 * indicate done before it really is. Needs 50ms to be sure
13443 if (!i40e_enabled_xdp_vsi(vsi))
13446 ret = i40e_control_wait_tx_q(vsi->seid, pf,
13447 pf_q + vsi->alloc_queue_pairs,
13448 true /*is xdp*/, enable);
13450 dev_info(&pf->pdev->dev,
13451 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13452 vsi->seid, pf_q, (enable ? "en" : "dis"));
13459 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13461 * @queue_pair: queue_pair
13463 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13465 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13466 struct i40e_pf *pf = vsi->back;
13467 struct i40e_hw *hw = &pf->hw;
13469 /* All rings in a qp belong to the same qvector. */
13470 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
13471 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
13473 i40e_irq_dynamic_enable_icr0(pf);
13479 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13481 * @queue_pair: queue_pair
13483 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13485 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13486 struct i40e_pf *pf = vsi->back;
13487 struct i40e_hw *hw = &pf->hw;
13489 /* For simplicity, instead of removing the qp interrupt causes
13490 * from the interrupt linked list, we simply disable the interrupt, and
13491 * leave the list intact.
13493 * All rings in a qp belong to the same qvector.
13495 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
13496 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13498 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13500 synchronize_irq(pf->msix_entries[intpf].vector);
13502 /* Legacy and MSI mode - this stops all interrupt handling */
13503 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13504 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13506 synchronize_irq(pf->pdev->irq);
13511 * i40e_queue_pair_disable - Disables a queue pair
13513 * @queue_pair: queue pair
13515 * Returns 0 on success, <0 on failure.
13517 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13521 err = i40e_enter_busy_conf(vsi);
13525 i40e_queue_pair_disable_irq(vsi, queue_pair);
13526 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
13527 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
13528 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
13529 i40e_queue_pair_clean_rings(vsi, queue_pair);
13530 i40e_queue_pair_reset_stats(vsi, queue_pair);
13536 * i40e_queue_pair_enable - Enables a queue pair
13538 * @queue_pair: queue pair
13540 * Returns 0 on success, <0 on failure.
13542 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13546 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
13550 if (i40e_enabled_xdp_vsi(vsi)) {
13551 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
13556 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
13560 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
13561 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
13562 i40e_queue_pair_enable_irq(vsi, queue_pair);
13564 i40e_exit_busy_conf(vsi);
13570 * i40e_xdp - implements ndo_bpf for i40e
13572 * @xdp: XDP command
13574 static int i40e_xdp(struct net_device *dev,
13575 struct netdev_bpf *xdp)
13577 struct i40e_netdev_priv *np = netdev_priv(dev);
13578 struct i40e_vsi *vsi = np->vsi;
13580 if (vsi->type != I40E_VSI_MAIN)
13583 switch (xdp->command) {
13584 case XDP_SETUP_PROG:
13585 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
13586 case XDP_SETUP_XSK_POOL:
13587 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
13588 xdp->xsk.queue_id);
13594 static const struct net_device_ops i40e_netdev_ops = {
13595 .ndo_open = i40e_open,
13596 .ndo_stop = i40e_close,
13597 .ndo_start_xmit = i40e_lan_xmit_frame,
13598 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13599 .ndo_set_rx_mode = i40e_set_rx_mode,
13600 .ndo_validate_addr = eth_validate_addr,
13601 .ndo_set_mac_address = i40e_set_mac,
13602 .ndo_change_mtu = i40e_change_mtu,
13603 .ndo_eth_ioctl = i40e_ioctl,
13604 .ndo_tx_timeout = i40e_tx_timeout,
13605 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13606 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13607 #ifdef CONFIG_NET_POLL_CONTROLLER
13608 .ndo_poll_controller = i40e_netpoll,
13610 .ndo_setup_tc = __i40e_setup_tc,
13611 .ndo_select_queue = i40e_lan_select_queue,
13612 .ndo_set_features = i40e_set_features,
13613 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13614 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13615 .ndo_get_vf_stats = i40e_get_vf_stats,
13616 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13617 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13618 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13619 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13620 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13621 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13622 .ndo_fdb_add = i40e_ndo_fdb_add,
13623 .ndo_features_check = i40e_features_check,
13624 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13625 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13626 .ndo_bpf = i40e_xdp,
13627 .ndo_xdp_xmit = i40e_xdp_xmit,
13628 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13629 .ndo_dfwd_add_station = i40e_fwd_add,
13630 .ndo_dfwd_del_station = i40e_fwd_del,
13634 * i40e_config_netdev - Setup the netdev flags
13635 * @vsi: the VSI being configured
13637 * Returns 0 on success, negative value on failure
13639 static int i40e_config_netdev(struct i40e_vsi *vsi)
13641 struct i40e_pf *pf = vsi->back;
13642 struct i40e_hw *hw = &pf->hw;
13643 struct i40e_netdev_priv *np;
13644 struct net_device *netdev;
13645 u8 broadcast[ETH_ALEN];
13646 u8 mac_addr[ETH_ALEN];
13648 netdev_features_t hw_enc_features;
13649 netdev_features_t hw_features;
13651 etherdev_size = sizeof(struct i40e_netdev_priv);
13652 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13656 vsi->netdev = netdev;
13657 np = netdev_priv(netdev);
13660 hw_enc_features = NETIF_F_SG |
13663 NETIF_F_SOFT_FEATURES |
13668 NETIF_F_GSO_GRE_CSUM |
13669 NETIF_F_GSO_PARTIAL |
13670 NETIF_F_GSO_IPXIP4 |
13671 NETIF_F_GSO_IPXIP6 |
13672 NETIF_F_GSO_UDP_TUNNEL |
13673 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13674 NETIF_F_GSO_UDP_L4 |
13680 if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps))
13681 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13683 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13685 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13687 netdev->hw_enc_features |= hw_enc_features;
13689 /* record features VLANs can make use of */
13690 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13692 #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13693 NETIF_F_GSO_GRE_CSUM | \
13694 NETIF_F_GSO_IPXIP4 | \
13695 NETIF_F_GSO_IPXIP6 | \
13696 NETIF_F_GSO_UDP_TUNNEL | \
13697 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13699 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13700 netdev->features |= NETIF_F_GSO_PARTIAL |
13701 I40E_GSO_PARTIAL_FEATURES;
13703 netdev->mpls_features |= NETIF_F_SG;
13704 netdev->mpls_features |= NETIF_F_HW_CSUM;
13705 netdev->mpls_features |= NETIF_F_TSO;
13706 netdev->mpls_features |= NETIF_F_TSO6;
13707 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13709 /* enable macvlan offloads */
13710 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13712 hw_features = hw_enc_features |
13713 NETIF_F_HW_VLAN_CTAG_TX |
13714 NETIF_F_HW_VLAN_CTAG_RX;
13716 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
13717 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13719 netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
13721 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13722 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13724 netdev->features &= ~NETIF_F_HW_TC;
13726 if (vsi->type == I40E_VSI_MAIN) {
13727 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13728 ether_addr_copy(mac_addr, hw->mac.perm_addr);
13729 /* The following steps are necessary for two reasons. First,
13730 * some older NVM configurations load a default MAC-VLAN
13731 * filter that will accept any tagged packet, and we want to
13732 * replace this with a normal filter. Additionally, it is
13733 * possible our MAC address was provided by the platform using
13734 * Open Firmware or similar.
13736 * Thus, we need to remove the default filter and install one
13737 * specific to the MAC address.
13739 i40e_rm_default_mac_filter(vsi, mac_addr);
13740 spin_lock_bh(&vsi->mac_filter_hash_lock);
13741 i40e_add_mac_filter(vsi, mac_addr);
13742 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13744 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
13745 NETDEV_XDP_ACT_REDIRECT |
13746 NETDEV_XDP_ACT_XSK_ZEROCOPY |
13747 NETDEV_XDP_ACT_RX_SG;
13748 netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD;
13750 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13751 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13752 * the end, which is 4 bytes long, so force truncation of the
13753 * original name by IFNAMSIZ - 4
13755 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
13757 pf->vsi[pf->lan_vsi]->netdev->name);
13758 eth_random_addr(mac_addr);
13760 spin_lock_bh(&vsi->mac_filter_hash_lock);
13761 i40e_add_mac_filter(vsi, mac_addr);
13762 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13765 /* Add the broadcast filter so that we initially will receive
13766 * broadcast packets. Note that when a new VLAN is first added the
13767 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13768 * specific filters as part of transitioning into "vlan" operation.
13769 * When more VLANs are added, the driver will copy each existing MAC
13770 * filter and add it for the new VLAN.
13772 * Broadcast filters are handled specially by
13773 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13774 * promiscuous bit instead of adding this directly as a MAC/VLAN
13775 * filter. The subtask will update the correct broadcast promiscuous
13776 * bits as VLANs become active or inactive.
13778 eth_broadcast_addr(broadcast);
13779 spin_lock_bh(&vsi->mac_filter_hash_lock);
13780 i40e_add_mac_filter(vsi, broadcast);
13781 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13783 eth_hw_addr_set(netdev, mac_addr);
13784 ether_addr_copy(netdev->perm_addr, mac_addr);
13786 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13787 netdev->neigh_priv_len = sizeof(u32) * 4;
13789 netdev->priv_flags |= IFF_UNICAST_FLT;
13790 netdev->priv_flags |= IFF_SUPP_NOFCS;
13791 /* Setup netdev TC information */
13792 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13794 netdev->netdev_ops = &i40e_netdev_ops;
13795 netdev->watchdog_timeo = 5 * HZ;
13796 i40e_set_ethtool_ops(netdev);
13798 /* MTU range: 68 - 9706 */
13799 netdev->min_mtu = ETH_MIN_MTU;
13800 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13806 * i40e_vsi_delete - Delete a VSI from the switch
13807 * @vsi: the VSI being removed
13809 * Returns 0 on success, negative value on failure
13811 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13813 /* remove default VSI is not allowed */
13814 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13817 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13821 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13822 * @vsi: the VSI being queried
13824 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13826 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13828 struct i40e_veb *veb;
13829 struct i40e_pf *pf = vsi->back;
13831 /* Uplink is not a bridge so default to VEB */
13832 if (vsi->veb_idx >= I40E_MAX_VEB)
13835 veb = pf->veb[vsi->veb_idx];
13837 dev_info(&pf->pdev->dev,
13838 "There is no veb associated with the bridge\n");
13842 /* Uplink is a bridge in VEPA mode */
13843 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13846 /* Uplink is a bridge in VEB mode */
13850 /* VEPA is now default bridge, so return 0 */
13855 * i40e_add_vsi - Add a VSI to the switch
13856 * @vsi: the VSI being configured
13858 * This initializes a VSI context depending on the VSI type to be added and
13859 * passes it down to the add_vsi aq command.
13861 static int i40e_add_vsi(struct i40e_vsi *vsi)
13864 struct i40e_pf *pf = vsi->back;
13865 struct i40e_hw *hw = &pf->hw;
13866 struct i40e_vsi_context ctxt;
13867 struct i40e_mac_filter *f;
13868 struct hlist_node *h;
13871 u8 enabled_tc = 0x1; /* TC0 enabled */
13874 memset(&ctxt, 0, sizeof(ctxt));
13875 switch (vsi->type) {
13876 case I40E_VSI_MAIN:
13877 /* The PF's main VSI is already setup as part of the
13878 * device initialization, so we'll not bother with
13879 * the add_vsi call, but we will retrieve the current
13882 ctxt.seid = pf->main_vsi_seid;
13883 ctxt.pf_num = pf->hw.pf_id;
13885 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13886 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13888 dev_info(&pf->pdev->dev,
13889 "couldn't get PF vsi config, err %pe aq_err %s\n",
13891 i40e_aq_str(&pf->hw,
13892 pf->hw.aq.asq_last_status));
13895 vsi->info = ctxt.info;
13896 vsi->info.valid_sections = 0;
13898 vsi->seid = ctxt.seid;
13899 vsi->id = ctxt.vsi_number;
13901 enabled_tc = i40e_pf_get_tc_map(pf);
13903 /* Source pruning is enabled by default, so the flag is
13904 * negative logic - if it's set, we need to fiddle with
13905 * the VSI to disable source pruning.
13907 if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) {
13908 memset(&ctxt, 0, sizeof(ctxt));
13909 ctxt.seid = pf->main_vsi_seid;
13910 ctxt.pf_num = pf->hw.pf_id;
13912 ctxt.info.valid_sections |=
13913 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13914 ctxt.info.switch_id =
13915 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13916 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13918 dev_info(&pf->pdev->dev,
13919 "update vsi failed, err %d aq_err %s\n",
13921 i40e_aq_str(&pf->hw,
13922 pf->hw.aq.asq_last_status));
13928 /* MFP mode setup queue map and update VSI */
13929 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
13930 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13931 memset(&ctxt, 0, sizeof(ctxt));
13932 ctxt.seid = pf->main_vsi_seid;
13933 ctxt.pf_num = pf->hw.pf_id;
13935 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13936 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13938 dev_info(&pf->pdev->dev,
13939 "update vsi failed, err %pe aq_err %s\n",
13941 i40e_aq_str(&pf->hw,
13942 pf->hw.aq.asq_last_status));
13946 /* update the local VSI info queue map */
13947 i40e_vsi_update_queue_map(vsi, &ctxt);
13948 vsi->info.valid_sections = 0;
13950 /* Default/Main VSI is only enabled for TC0
13951 * reconfigure it to enable all TCs that are
13952 * available on the port in SFP mode.
13953 * For MFP case the iSCSI PF would use this
13954 * flow to enable LAN+iSCSI TC.
13956 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13958 /* Single TC condition is not fatal,
13959 * message and continue
13961 dev_info(&pf->pdev->dev,
13962 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
13965 i40e_aq_str(&pf->hw,
13966 pf->hw.aq.asq_last_status));
13971 case I40E_VSI_FDIR:
13972 ctxt.pf_num = hw->pf_id;
13974 ctxt.uplink_seid = vsi->uplink_seid;
13975 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13976 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13977 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) &&
13978 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13979 ctxt.info.valid_sections |=
13980 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13981 ctxt.info.switch_id =
13982 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13984 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13987 case I40E_VSI_VMDQ2:
13988 ctxt.pf_num = hw->pf_id;
13990 ctxt.uplink_seid = vsi->uplink_seid;
13991 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13992 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13994 /* This VSI is connected to VEB so the switch_id
13995 * should be set to zero by default.
13997 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13998 ctxt.info.valid_sections |=
13999 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14000 ctxt.info.switch_id =
14001 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14004 /* Setup the VSI tx/rx queue map for TC0 only for now */
14005 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14008 case I40E_VSI_SRIOV:
14009 ctxt.pf_num = hw->pf_id;
14010 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
14011 ctxt.uplink_seid = vsi->uplink_seid;
14012 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14013 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
14015 /* This VSI is connected to VEB so the switch_id
14016 * should be set to zero by default.
14018 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14019 ctxt.info.valid_sections |=
14020 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14021 ctxt.info.switch_id =
14022 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14025 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
14026 ctxt.info.valid_sections |=
14027 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14028 ctxt.info.queueing_opt_flags |=
14029 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14030 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14033 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14034 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14035 if (pf->vf[vsi->vf_id].spoofchk) {
14036 ctxt.info.valid_sections |=
14037 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14038 ctxt.info.sec_flags |=
14039 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14040 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14042 /* Setup the VSI tx/rx queue map for TC0 only for now */
14043 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
14046 case I40E_VSI_IWARP:
14047 /* send down message to iWARP */
14054 if (vsi->type != I40E_VSI_MAIN) {
14055 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
14057 dev_info(&vsi->back->pdev->dev,
14058 "add vsi failed, err %pe aq_err %s\n",
14060 i40e_aq_str(&pf->hw,
14061 pf->hw.aq.asq_last_status));
14065 vsi->info = ctxt.info;
14066 vsi->info.valid_sections = 0;
14067 vsi->seid = ctxt.seid;
14068 vsi->id = ctxt.vsi_number;
14071 spin_lock_bh(&vsi->mac_filter_hash_lock);
14072 vsi->active_filters = 0;
14073 /* If macvlan filters already exist, force them to get loaded */
14074 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14075 f->state = I40E_FILTER_NEW;
14078 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14079 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
14082 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14083 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
14086 /* Update VSI BW information */
14087 ret = i40e_vsi_get_bw_info(vsi);
14089 dev_info(&pf->pdev->dev,
14090 "couldn't get vsi bw info, err %pe aq_err %s\n",
14092 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14093 /* VSI is already added so not tearing that up */
14102 * i40e_vsi_release - Delete a VSI and free its resources
14103 * @vsi: the VSI being removed
14105 * Returns 0 on success or < 0 on error
14107 int i40e_vsi_release(struct i40e_vsi *vsi)
14109 struct i40e_mac_filter *f;
14110 struct hlist_node *h;
14111 struct i40e_veb *veb;
14112 struct i40e_pf *pf;
14118 /* release of a VEB-owner or last VSI is not allowed */
14119 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14120 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14121 vsi->seid, vsi->uplink_seid);
14124 if (vsi == pf->vsi[pf->lan_vsi] &&
14125 !test_bit(__I40E_DOWN, pf->state)) {
14126 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14129 set_bit(__I40E_VSI_RELEASING, vsi->state);
14130 uplink_seid = vsi->uplink_seid;
14132 if (vsi->type != I40E_VSI_SRIOV) {
14133 if (vsi->netdev_registered) {
14134 vsi->netdev_registered = false;
14136 /* results in a call to i40e_close() */
14137 unregister_netdev(vsi->netdev);
14140 i40e_vsi_close(vsi);
14142 i40e_vsi_disable_irq(vsi);
14145 if (vsi->type == I40E_VSI_MAIN)
14146 i40e_devlink_destroy_port(pf);
14148 spin_lock_bh(&vsi->mac_filter_hash_lock);
14150 /* clear the sync flag on all filters */
14152 __dev_uc_unsync(vsi->netdev, NULL);
14153 __dev_mc_unsync(vsi->netdev, NULL);
14156 /* make sure any remaining filters are marked for deletion */
14157 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14158 __i40e_del_filter(vsi, f);
14160 spin_unlock_bh(&vsi->mac_filter_hash_lock);
14162 i40e_sync_vsi_filters(vsi);
14164 i40e_vsi_delete(vsi);
14165 i40e_vsi_free_q_vectors(vsi);
14167 free_netdev(vsi->netdev);
14168 vsi->netdev = NULL;
14170 i40e_vsi_clear_rings(vsi);
14171 i40e_vsi_clear(vsi);
14173 /* If this was the last thing on the VEB, except for the
14174 * controlling VSI, remove the VEB, which puts the controlling
14175 * VSI onto the uplink port.
14177 * Well, okay, there's one more exception here: don't remove
14178 * the floating VEBs yet. We'll wait for an explicit remove request
14179 * from up the network stack.
14181 veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
14182 if (veb && veb->uplink_seid) {
14185 /* Count non-controlling VSIs present on the VEB */
14186 i40e_pf_for_each_vsi(pf, i, vsi)
14187 if (vsi->uplink_seid == uplink_seid &&
14188 (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14191 /* If there is no VSI except the control one then release
14192 * the VEB and put the control VSI onto VEB uplink.
14195 i40e_veb_release(veb);
14202 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14203 * @vsi: ptr to the VSI
14205 * This should only be called after i40e_vsi_mem_alloc() which allocates the
14206 * corresponding SW VSI structure and initializes num_queue_pairs for the
14207 * newly allocated VSI.
14209 * Returns 0 on success or negative on failure
14211 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14214 struct i40e_pf *pf = vsi->back;
14216 if (vsi->q_vectors[0]) {
14217 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14222 if (vsi->base_vector) {
14223 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14224 vsi->seid, vsi->base_vector);
14228 ret = i40e_vsi_alloc_q_vectors(vsi);
14230 dev_info(&pf->pdev->dev,
14231 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14232 vsi->num_q_vectors, vsi->seid, ret);
14233 vsi->num_q_vectors = 0;
14234 goto vector_setup_out;
14237 /* In Legacy mode, we do not have to get any other vector since we
14238 * piggyback on the misc/ICR0 for queue interrupts.
14240 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
14242 if (vsi->num_q_vectors)
14243 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
14244 vsi->num_q_vectors, vsi->idx);
14245 if (vsi->base_vector < 0) {
14246 dev_info(&pf->pdev->dev,
14247 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14248 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14249 i40e_vsi_free_q_vectors(vsi);
14251 goto vector_setup_out;
14259 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14260 * @vsi: pointer to the vsi.
14262 * This re-allocates a vsi's queue resources.
14264 * Returns pointer to the successfully allocated and configured VSI sw struct
14265 * on success, otherwise returns NULL on failure.
14267 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14269 u16 alloc_queue_pairs;
14270 struct i40e_pf *pf;
14279 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
14280 i40e_vsi_clear_rings(vsi);
14282 i40e_vsi_free_arrays(vsi, false);
14283 i40e_set_num_rings_in_vsi(vsi);
14284 ret = i40e_vsi_alloc_arrays(vsi, false);
14288 alloc_queue_pairs = vsi->alloc_queue_pairs *
14289 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14291 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14293 dev_info(&pf->pdev->dev,
14294 "failed to get tracking for %d queues for VSI %d err %d\n",
14295 alloc_queue_pairs, vsi->seid, ret);
14298 vsi->base_queue = ret;
14300 /* Update the FW view of the VSI. Force a reset of TC and queue
14301 * layout configurations.
14303 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14304 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14305 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14306 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14307 if (vsi->type == I40E_VSI_MAIN)
14308 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
14310 /* assign it some queues */
14311 ret = i40e_alloc_rings(vsi);
14315 /* map all of the rings to the q_vectors */
14316 i40e_vsi_map_rings_to_vectors(vsi);
14320 i40e_vsi_free_q_vectors(vsi);
14321 if (vsi->netdev_registered) {
14322 vsi->netdev_registered = false;
14323 unregister_netdev(vsi->netdev);
14324 free_netdev(vsi->netdev);
14325 vsi->netdev = NULL;
14327 if (vsi->type == I40E_VSI_MAIN)
14328 i40e_devlink_destroy_port(pf);
14329 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14331 i40e_vsi_clear(vsi);
14336 * i40e_vsi_setup - Set up a VSI by a given type
14337 * @pf: board private structure
14339 * @uplink_seid: the switch element to link to
14340 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14342 * This allocates the sw VSI structure and its queue resources, then add a VSI
14343 * to the identified VEB.
14345 * Returns pointer to the successfully allocated and configure VSI sw struct on
14346 * success, otherwise returns NULL on failure.
14348 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14349 u16 uplink_seid, u32 param1)
14351 struct i40e_vsi *vsi = NULL;
14352 struct i40e_veb *veb = NULL;
14353 u16 alloc_queue_pairs;
14357 /* The requested uplink_seid must be either
14358 * - the PF's port seid
14359 * no VEB is needed because this is the PF
14360 * or this is a Flow Director special case VSI
14361 * - seid of an existing VEB
14362 * - seid of a VSI that owns an existing VEB
14363 * - seid of a VSI that doesn't own a VEB
14364 * a new VEB is created and the VSI becomes the owner
14365 * - seid of the PF VSI, which is what creates the first VEB
14366 * this is a special case of the previous
14368 * Find which uplink_seid we were given and create a new VEB if needed
14370 veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
14371 if (!veb && uplink_seid != pf->mac_seid) {
14372 vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
14374 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14379 if (vsi->uplink_seid == pf->mac_seid)
14380 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
14381 vsi->tc_config.enabled_tc);
14382 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14383 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
14384 vsi->tc_config.enabled_tc);
14386 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14387 dev_info(&vsi->back->pdev->dev,
14388 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14391 /* We come up by default in VEPA mode if SRIOV is not
14392 * already enabled, in which case we can't force VEPA
14395 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
14396 veb->bridge_mode = BRIDGE_MODE_VEPA;
14397 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
14399 i40e_config_bridge_mode(veb);
14401 veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
14403 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14407 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14408 uplink_seid = veb->seid;
14411 /* get vsi sw struct */
14412 v_idx = i40e_vsi_mem_alloc(pf, type);
14415 vsi = pf->vsi[v_idx];
14419 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14421 if (type == I40E_VSI_MAIN)
14422 pf->lan_vsi = v_idx;
14423 else if (type == I40E_VSI_SRIOV)
14424 vsi->vf_id = param1;
14425 /* assign it some queues */
14426 alloc_queue_pairs = vsi->alloc_queue_pairs *
14427 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14429 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
14431 dev_info(&pf->pdev->dev,
14432 "failed to get tracking for %d queues for VSI %d err=%d\n",
14433 alloc_queue_pairs, vsi->seid, ret);
14436 vsi->base_queue = ret;
14438 /* get a VSI from the hardware */
14439 vsi->uplink_seid = uplink_seid;
14440 ret = i40e_add_vsi(vsi);
14444 switch (vsi->type) {
14445 /* setup the netdev if needed */
14446 case I40E_VSI_MAIN:
14447 case I40E_VSI_VMDQ2:
14448 ret = i40e_config_netdev(vsi);
14451 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14454 if (vsi->type == I40E_VSI_MAIN) {
14455 ret = i40e_devlink_create_port(pf);
14458 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
14460 ret = register_netdev(vsi->netdev);
14463 vsi->netdev_registered = true;
14464 netif_carrier_off(vsi->netdev);
14465 #ifdef CONFIG_I40E_DCB
14466 /* Setup DCB netlink interface */
14467 i40e_dcbnl_setup(vsi);
14468 #endif /* CONFIG_I40E_DCB */
14470 case I40E_VSI_FDIR:
14471 /* set up vectors and rings if needed */
14472 ret = i40e_vsi_setup_vectors(vsi);
14476 ret = i40e_alloc_rings(vsi);
14480 /* map all of the rings to the q_vectors */
14481 i40e_vsi_map_rings_to_vectors(vsi);
14483 i40e_vsi_reset_stats(vsi);
14486 /* no netdev or rings for the other VSI types */
14490 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
14491 vsi->type == I40E_VSI_VMDQ2) {
14492 ret = i40e_vsi_config_rss(vsi);
14499 i40e_vsi_clear_rings(vsi);
14501 i40e_vsi_free_q_vectors(vsi);
14503 if (vsi->netdev_registered) {
14504 vsi->netdev_registered = false;
14505 unregister_netdev(vsi->netdev);
14506 free_netdev(vsi->netdev);
14507 vsi->netdev = NULL;
14510 if (vsi->type == I40E_VSI_MAIN)
14511 i40e_devlink_destroy_port(pf);
14513 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
14515 i40e_vsi_clear(vsi);
14521 * i40e_veb_get_bw_info - Query VEB BW information
14522 * @veb: the veb to query
14524 * Query the Tx scheduler BW configuration data for given VEB
14526 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14528 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14529 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14530 struct i40e_pf *pf = veb->pf;
14531 struct i40e_hw *hw = &pf->hw;
14536 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
14539 dev_info(&pf->pdev->dev,
14540 "query veb bw config failed, err %pe aq_err %s\n",
14542 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14546 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
14549 dev_info(&pf->pdev->dev,
14550 "query veb bw ets config failed, err %pe aq_err %s\n",
14552 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14556 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14557 veb->bw_max_quanta = ets_data.tc_bw_max;
14558 veb->is_abs_credits = bw_data.absolute_credits_enable;
14559 veb->enabled_tc = ets_data.tc_valid_bits;
14560 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14561 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14562 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14563 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14564 veb->bw_tc_limit_credits[i] =
14565 le16_to_cpu(bw_data.tc_bw_limits[i]);
14566 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14574 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14575 * @pf: board private structure
14577 * On error: returns error code (negative)
14578 * On success: returns vsi index in PF (positive)
14580 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14583 struct i40e_veb *veb;
14586 /* Need to protect the allocation of switch elements at the PF level */
14587 mutex_lock(&pf->switch_mutex);
14589 /* VEB list may be fragmented if VEB creation/destruction has
14590 * been happening. We can afford to do a quick scan to look
14591 * for any free slots in the list.
14593 * find next empty veb slot, looping back around if necessary
14596 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14598 if (i >= I40E_MAX_VEB) {
14600 goto err_alloc_veb; /* out of VEB slots! */
14603 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
14606 goto err_alloc_veb;
14610 veb->enabled_tc = 1;
14615 mutex_unlock(&pf->switch_mutex);
14620 * i40e_switch_branch_release - Delete a branch of the switch tree
14621 * @branch: where to start deleting
14623 * This uses recursion to find the tips of the branch to be
14624 * removed, deleting until we get back to and can delete this VEB.
14626 static void i40e_switch_branch_release(struct i40e_veb *branch)
14628 struct i40e_pf *pf = branch->pf;
14629 u16 branch_seid = branch->seid;
14630 u16 veb_idx = branch->idx;
14631 struct i40e_vsi *vsi;
14632 struct i40e_veb *veb;
14635 /* release any VEBs on this VEB - RECURSION */
14636 i40e_pf_for_each_veb(pf, i, veb)
14637 if (veb->uplink_seid == branch->seid)
14638 i40e_switch_branch_release(veb);
14640 /* Release the VSIs on this VEB, but not the owner VSI.
14642 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14643 * the VEB itself, so don't use (*branch) after this loop.
14645 i40e_pf_for_each_vsi(pf, i, vsi)
14646 if (vsi->uplink_seid == branch_seid &&
14647 (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14648 i40e_vsi_release(vsi);
14650 /* There's one corner case where the VEB might not have been
14651 * removed, so double check it here and remove it if needed.
14652 * This case happens if the veb was created from the debugfs
14653 * commands and no VSIs were added to it.
14655 if (pf->veb[veb_idx])
14656 i40e_veb_release(pf->veb[veb_idx]);
14660 * i40e_veb_clear - remove veb struct
14661 * @veb: the veb to remove
14663 static void i40e_veb_clear(struct i40e_veb *veb)
14669 struct i40e_pf *pf = veb->pf;
14671 mutex_lock(&pf->switch_mutex);
14672 if (pf->veb[veb->idx] == veb)
14673 pf->veb[veb->idx] = NULL;
14674 mutex_unlock(&pf->switch_mutex);
14681 * i40e_veb_release - Delete a VEB and free its resources
14682 * @veb: the VEB being removed
14684 void i40e_veb_release(struct i40e_veb *veb)
14686 struct i40e_vsi *vsi, *vsi_it;
14687 struct i40e_pf *pf;
14692 /* find the remaining VSI and check for extras */
14693 i40e_pf_for_each_vsi(pf, i, vsi_it)
14694 if (vsi_it->uplink_seid == veb->seid) {
14695 if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
14700 /* Floating VEB has to be empty and regular one must have
14701 * single owner VSI.
14703 if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
14704 dev_info(&pf->pdev->dev,
14705 "can't remove VEB %d with %d VSIs left\n",
14710 /* For regular VEB move the owner VSI to uplink port */
14711 if (veb->uplink_seid) {
14712 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14713 vsi->uplink_seid = veb->uplink_seid;
14714 vsi->veb_idx = I40E_NO_VEB;
14717 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14718 i40e_veb_clear(veb);
14722 * i40e_add_veb - create the VEB in the switch
14723 * @veb: the VEB to be instantiated
14724 * @vsi: the controlling VSI
14726 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14728 struct i40e_pf *pf = veb->pf;
14729 bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
14732 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
14733 veb->enabled_tc, vsi ? false : true,
14734 &veb->seid, enable_stats, NULL);
14736 /* get a VEB from the hardware */
14738 dev_info(&pf->pdev->dev,
14739 "couldn't add VEB, err %pe aq_err %s\n",
14741 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14745 /* get statistics counter */
14746 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
14747 &veb->stats_idx, NULL, NULL, NULL);
14749 dev_info(&pf->pdev->dev,
14750 "couldn't get VEB statistics idx, err %pe aq_err %s\n",
14752 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14755 ret = i40e_veb_get_bw_info(veb);
14757 dev_info(&pf->pdev->dev,
14758 "couldn't get VEB bw info, err %pe aq_err %s\n",
14760 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14761 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
14766 vsi->uplink_seid = veb->seid;
14767 vsi->veb_idx = veb->idx;
14768 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14775 * i40e_veb_setup - Set up a VEB
14776 * @pf: board private structure
14777 * @flags: VEB setup flags
14778 * @uplink_seid: the switch element to link to
14779 * @vsi_seid: the initial VSI seid
14780 * @enabled_tc: Enabled TC bit-map
14782 * This allocates the sw VEB structure and links it into the switch
14783 * It is possible and legal for this to be a duplicate of an already
14784 * existing VEB. It is also possible for both uplink and vsi seids
14785 * to be zero, in order to create a floating VEB.
14787 * Returns pointer to the successfully allocated VEB sw struct on
14788 * success, otherwise returns NULL on failure.
14790 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14791 u16 uplink_seid, u16 vsi_seid,
14794 struct i40e_vsi *vsi = NULL;
14795 struct i40e_veb *veb;
14799 /* if one seid is 0, the other must be 0 to create a floating relay */
14800 if ((uplink_seid == 0 || vsi_seid == 0) &&
14801 (uplink_seid + vsi_seid != 0)) {
14802 dev_info(&pf->pdev->dev,
14803 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14804 uplink_seid, vsi_seid);
14808 /* make sure there is such a vsi and uplink */
14810 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
14812 dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
14818 /* get veb sw struct */
14819 veb_idx = i40e_veb_mem_alloc(pf);
14822 veb = pf->veb[veb_idx];
14823 veb->flags = flags;
14824 veb->uplink_seid = uplink_seid;
14825 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14827 /* create the VEB in the switch */
14828 ret = i40e_add_veb(veb, vsi);
14832 if (vsi && vsi->idx == pf->lan_vsi)
14833 pf->lan_veb = veb->idx;
14838 i40e_veb_clear(veb);
14844 * i40e_setup_pf_switch_element - set PF vars based on switch type
14845 * @pf: board private structure
14846 * @ele: element we are building info from
14847 * @num_reported: total number of elements
14848 * @printconfig: should we print the contents
14850 * helper function to assist in extracting a few useful SEID values.
14852 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14853 struct i40e_aqc_switch_config_element_resp *ele,
14854 u16 num_reported, bool printconfig)
14856 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14857 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14858 u8 element_type = ele->element_type;
14859 u16 seid = le16_to_cpu(ele->seid);
14860 struct i40e_veb *veb;
14863 dev_info(&pf->pdev->dev,
14864 "type=%d seid=%d uplink=%d downlink=%d\n",
14865 element_type, seid, uplink_seid, downlink_seid);
14867 switch (element_type) {
14868 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14869 pf->mac_seid = seid;
14871 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14873 if (uplink_seid != pf->mac_seid)
14875 if (pf->lan_veb >= I40E_MAX_VEB) {
14878 /* find existing or else empty VEB */
14879 veb = i40e_pf_get_veb_by_seid(pf, seid);
14881 pf->lan_veb = veb->idx;
14883 v = i40e_veb_mem_alloc(pf);
14889 if (pf->lan_veb >= I40E_MAX_VEB)
14892 pf->veb[pf->lan_veb]->seid = seid;
14893 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14894 pf->veb[pf->lan_veb]->pf = pf;
14896 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14897 if (num_reported != 1)
14899 /* This is immediately after a reset so we can assume this is
14902 pf->mac_seid = uplink_seid;
14903 pf->main_vsi_seid = seid;
14905 dev_info(&pf->pdev->dev,
14906 "pf_seid=%d main_vsi_seid=%d\n",
14907 downlink_seid, pf->main_vsi_seid);
14909 case I40E_SWITCH_ELEMENT_TYPE_PF:
14910 case I40E_SWITCH_ELEMENT_TYPE_VF:
14911 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14912 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14913 case I40E_SWITCH_ELEMENT_TYPE_PE:
14914 case I40E_SWITCH_ELEMENT_TYPE_PA:
14915 /* ignore these for now */
14918 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14919 element_type, seid);
14925 * i40e_fetch_switch_configuration - Get switch config from firmware
14926 * @pf: board private structure
14927 * @printconfig: should we print the contents
14929 * Get the current switch configuration from the device and
14930 * extract a few useful SEID values.
14932 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14934 struct i40e_aqc_get_switch_config_resp *sw_config;
14940 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14944 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14946 u16 num_reported, num_total;
14948 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14952 dev_info(&pf->pdev->dev,
14953 "get switch config failed err %d aq_err %s\n",
14955 i40e_aq_str(&pf->hw,
14956 pf->hw.aq.asq_last_status));
14961 num_reported = le16_to_cpu(sw_config->header.num_reported);
14962 num_total = le16_to_cpu(sw_config->header.num_total);
14965 dev_info(&pf->pdev->dev,
14966 "header: %d reported %d total\n",
14967 num_reported, num_total);
14969 for (i = 0; i < num_reported; i++) {
14970 struct i40e_aqc_switch_config_element_resp *ele =
14971 &sw_config->element[i];
14973 i40e_setup_pf_switch_element(pf, ele, num_reported,
14976 } while (next_seid != 0);
14983 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14984 * @pf: board private structure
14985 * @reinit: if the Main VSI needs to re-initialized.
14986 * @lock_acquired: indicates whether or not the lock has been acquired
14988 * Returns 0 on success, negative value on failure
14990 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
14995 /* find out what's out there already */
14996 ret = i40e_fetch_switch_configuration(pf, false);
14998 dev_info(&pf->pdev->dev,
14999 "couldn't fetch switch config, err %pe aq_err %s\n",
15001 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15004 i40e_pf_reset_stats(pf);
15006 /* set the switch config bit for the whole device to
15007 * support limited promisc or true promisc
15008 * when user requests promisc. The default is limited
15012 if ((pf->hw.pf_id == 0) &&
15013 !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) {
15014 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15015 pf->last_sw_conf_flags = flags;
15018 if (pf->hw.pf_id == 0) {
15021 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15022 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
15024 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15025 dev_info(&pf->pdev->dev,
15026 "couldn't set switch config bits, err %pe aq_err %s\n",
15028 i40e_aq_str(&pf->hw,
15029 pf->hw.aq.asq_last_status));
15030 /* not a fatal problem, just keep going */
15032 pf->last_sw_conf_valid_flags = valid_flags;
15035 /* first time setup */
15036 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15037 struct i40e_vsi *vsi = NULL;
15040 /* Set up the PF VSI associated with the PF's main VSI
15041 * that is already in the HW switch
15043 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15044 uplink_seid = pf->veb[pf->lan_veb]->seid;
15046 uplink_seid = pf->mac_seid;
15047 if (pf->lan_vsi == I40E_NO_VSI)
15048 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
15050 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
15052 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15053 i40e_cloud_filter_exit(pf);
15054 i40e_fdir_teardown(pf);
15058 /* force a reset of TC and queue layout configurations */
15059 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15061 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15062 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15063 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
15065 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
15067 i40e_fdir_sb_setup(pf);
15069 /* Setup static PF queue filter control settings */
15070 ret = i40e_setup_pf_filter_control(pf);
15072 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15074 /* Failure here should not stop continuing other steps */
15077 /* enable RSS in the HW, even for only one queue, as the stack can use
15080 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
15081 i40e_pf_config_rss(pf);
15083 /* fill in link information and enable LSE reporting */
15084 i40e_link_event(pf);
15088 if (!lock_acquired)
15091 /* repopulate tunnel port filters */
15092 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
15094 if (!lock_acquired)
15101 * i40e_determine_queue_usage - Work out queue distribution
15102 * @pf: board private structure
15104 static void i40e_determine_queue_usage(struct i40e_pf *pf)
15109 pf->num_lan_qps = 0;
15111 /* Find the max queues to be put into basic use. We'll always be
15112 * using TC0, whether or not DCB is running, and TC0 will get the
15115 queues_left = pf->hw.func_caps.num_tx_qp;
15117 if ((queues_left == 1) ||
15118 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
15119 /* one qp for PF, no queues for anything else */
15121 pf->alloc_rss_size = pf->num_lan_qps = 1;
15123 /* make sure all the fancies are disabled */
15124 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
15125 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
15126 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15127 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
15128 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15129 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15130 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
15131 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
15132 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15133 } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) &&
15134 !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
15135 !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
15136 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) {
15137 /* one qp for PF */
15138 pf->alloc_rss_size = pf->num_lan_qps = 1;
15139 queues_left -= pf->num_lan_qps;
15141 clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
15142 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
15143 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15144 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
15145 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15146 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
15147 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15149 /* Not enough queues for all TCs */
15150 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) &&
15151 queues_left < I40E_MAX_TRAFFIC_CLASS) {
15152 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15153 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15154 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15157 /* limit lan qps to the smaller of qps, cpus or msix */
15158 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15159 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15160 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15161 pf->num_lan_qps = q_max;
15163 queues_left -= pf->num_lan_qps;
15166 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
15167 if (queues_left > 1) {
15168 queues_left -= 1; /* save 1 queue for FD */
15170 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
15171 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
15172 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15176 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
15177 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15178 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15179 (queues_left / pf->num_vf_qps));
15180 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15183 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) &&
15184 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15185 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15186 (queues_left / pf->num_vmdq_qps));
15187 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15190 pf->queues_left = queues_left;
15191 dev_dbg(&pf->pdev->dev,
15192 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15193 pf->hw.func_caps.num_tx_qp,
15194 !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags),
15195 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15196 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15201 * i40e_setup_pf_filter_control - Setup PF static filter control
15202 * @pf: PF to be setup
15204 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15205 * settings. If PE/FCoE are enabled then it will also set the per PF
15206 * based filter sizes required for them. It also enables Flow director,
15207 * ethertype and macvlan type filter settings for the pf.
15209 * Returns 0 on success, negative on failure
15211 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15213 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15215 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15217 /* Flow Director is enabled */
15218 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ||
15219 test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
15220 settings->enable_fdir = true;
15222 /* Ethtype and MACVLAN filters enabled for PF */
15223 settings->enable_ethtype = true;
15224 settings->enable_macvlan = true;
15226 if (i40e_set_filter_control(&pf->hw, settings))
15232 #define INFO_STRING_LEN 255
15233 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15234 static void i40e_print_features(struct i40e_pf *pf)
15236 struct i40e_hw *hw = &pf->hw;
15240 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15244 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
15245 #ifdef CONFIG_PCI_IOV
15246 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
15248 i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
15249 pf->hw.func_caps.num_vsis,
15250 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15251 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
15252 i += scnprintf(&buf[i], REMAIN(i), " RSS");
15253 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
15254 i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
15255 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
15256 i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
15257 i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
15259 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
15260 i += scnprintf(&buf[i], REMAIN(i), " DCB");
15261 i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
15262 i += scnprintf(&buf[i], REMAIN(i), " Geneve");
15263 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
15264 i += scnprintf(&buf[i], REMAIN(i), " PTP");
15265 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
15266 i += scnprintf(&buf[i], REMAIN(i), " VEB");
15268 i += scnprintf(&buf[i], REMAIN(i), " VEPA");
15270 dev_info(&pf->pdev->dev, "%s\n", buf);
15272 WARN_ON(i > INFO_STRING_LEN);
15276 * i40e_get_platform_mac_addr - get platform-specific MAC address
15277 * @pdev: PCI device information struct
15278 * @pf: board private structure
15280 * Look up the MAC address for the device. First we'll try
15281 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15282 * specific fallback. Otherwise, we'll default to the stored value in
15285 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15287 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
15288 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
15292 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15293 * @fec_cfg: FEC option to set in flags
15294 * @flags: ptr to flags in which we set FEC option
15296 void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags)
15298 if (fec_cfg & I40E_AQ_SET_FEC_AUTO) {
15299 set_bit(I40E_FLAG_RS_FEC, flags);
15300 set_bit(I40E_FLAG_BASE_R_FEC, flags);
15302 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15303 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15304 set_bit(I40E_FLAG_RS_FEC, flags);
15305 clear_bit(I40E_FLAG_BASE_R_FEC, flags);
15307 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15308 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15309 set_bit(I40E_FLAG_BASE_R_FEC, flags);
15310 clear_bit(I40E_FLAG_RS_FEC, flags);
15312 if (fec_cfg == 0) {
15313 clear_bit(I40E_FLAG_RS_FEC, flags);
15314 clear_bit(I40E_FLAG_BASE_R_FEC, flags);
15319 * i40e_check_recovery_mode - check if we are running transition firmware
15320 * @pf: board private structure
15322 * Check registers indicating the firmware runs in recovery mode. Sets the
15323 * appropriate driver state.
15325 * Returns true if the recovery mode was detected, false otherwise
15327 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15329 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15331 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15332 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15333 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15334 set_bit(__I40E_RECOVERY_MODE, pf->state);
15338 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15339 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15345 * i40e_pf_loop_reset - perform reset in a loop.
15346 * @pf: board private structure
15348 * This function is useful when a NIC is about to enter recovery mode.
15349 * When a NIC's internal data structures are corrupted the NIC's
15350 * firmware is going to enter recovery mode.
15351 * Right after a POR it takes about 7 minutes for firmware to enter
15352 * recovery mode. Until that time a NIC is in some kind of intermediate
15353 * state. After that time period the NIC almost surely enters
15354 * recovery mode. The only way for a driver to detect intermediate
15355 * state is to issue a series of pf-resets and check a return value.
15356 * If a PF reset returns success then the firmware could be in recovery
15357 * mode so the caller of this code needs to check for recovery mode
15358 * if this function returns success. There is a little chance that
15359 * firmware will hang in intermediate state forever.
15360 * Since waiting 7 minutes is quite a lot of time this function waits
15361 * 10 seconds and then gives up by returning an error.
15363 * Return 0 on success, negative on failure.
15365 static int i40e_pf_loop_reset(struct i40e_pf *pf)
15367 /* wait max 10 seconds for PF reset to succeed */
15368 const unsigned long time_end = jiffies + 10 * HZ;
15369 struct i40e_hw *hw = &pf->hw;
15372 ret = i40e_pf_reset(hw);
15373 while (ret != 0 && time_before(jiffies, time_end)) {
15374 usleep_range(10000, 20000);
15375 ret = i40e_pf_reset(hw);
15381 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15387 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15388 * @pf: board private structure
15390 * Check FW registers to determine if FW issued unexpected EMP Reset.
15391 * Every time when unexpected EMP Reset occurs the FW increments
15392 * a counter of unexpected EMP Resets. When the counter reaches 10
15393 * the FW should enter the Recovery mode
15395 * Returns true if FW issued unexpected EMP Reset
15397 static bool i40e_check_fw_empr(struct i40e_pf *pf)
15399 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15400 I40E_GL_FWSTS_FWS1B_MASK;
15401 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15402 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15406 * i40e_handle_resets - handle EMP resets and PF resets
15407 * @pf: board private structure
15409 * Handle both EMP resets and PF resets and conclude whether there are
15410 * any issues regarding these resets. If there are any issues then
15411 * generate log entry.
15413 * Return 0 if NIC is healthy or negative value when there are issues
15416 static int i40e_handle_resets(struct i40e_pf *pf)
15418 const int pfr = i40e_pf_loop_reset(pf);
15419 const bool is_empr = i40e_check_fw_empr(pf);
15421 if (is_empr || pfr != 0)
15422 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15424 return is_empr ? -EIO : pfr;
15428 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15429 * @pf: board private structure
15430 * @hw: ptr to the hardware info
15432 * This function does a minimal setup of all subsystems needed for running
15435 * Returns 0 on success, negative on failure
15437 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15439 struct i40e_vsi *vsi;
15443 pci_set_drvdata(pf->pdev, pf);
15444 pci_save_state(pf->pdev);
15446 /* set up periodic task facility */
15447 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15448 pf->service_timer_period = HZ;
15450 INIT_WORK(&pf->service_task, i40e_service_task);
15451 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15453 err = i40e_init_interrupt_scheme(pf);
15455 goto err_switch_setup;
15457 /* The number of VSIs reported by the FW is the minimum guaranteed
15458 * to us; HW supports far more and we share the remaining pool with
15459 * the other PFs. We allocate space for more than the guarantee with
15460 * the understanding that we might not get them all later.
15462 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15463 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15465 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15467 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15468 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15472 goto err_switch_setup;
15475 /* We allocate one VSI which is needed as absolute minimum
15476 * in order to register the netdev
15478 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
15481 goto err_switch_setup;
15483 pf->lan_vsi = v_idx;
15484 vsi = pf->vsi[v_idx];
15487 goto err_switch_setup;
15489 vsi->alloc_queue_pairs = 1;
15490 err = i40e_config_netdev(vsi);
15492 goto err_switch_setup;
15493 err = register_netdev(vsi->netdev);
15495 goto err_switch_setup;
15496 vsi->netdev_registered = true;
15497 i40e_dbg_pf_init(pf);
15499 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15501 goto err_switch_setup;
15503 /* tell the firmware that we're starting */
15504 i40e_send_version(pf);
15506 /* since everything's happy, start the service_task timer */
15507 mod_timer(&pf->service_timer,
15508 round_jiffies(jiffies + pf->service_timer_period));
15513 i40e_reset_interrupt_capability(pf);
15514 timer_shutdown_sync(&pf->service_timer);
15515 i40e_shutdown_adminq(hw);
15516 iounmap(hw->hw_addr);
15517 pci_release_mem_regions(pf->pdev);
15518 pci_disable_device(pf->pdev);
15525 * i40e_set_subsystem_device_id - set subsystem device id
15526 * @hw: pointer to the hardware info
15528 * Set PCI subsystem device id either from a pci_dev structure or
15529 * a specific FW register.
15531 static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15533 struct i40e_pf *pf = i40e_hw_to_pf(hw);
15535 hw->subsystem_device_id = pf->pdev->subsystem_device ?
15536 pf->pdev->subsystem_device :
15537 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15541 * i40e_probe - Device initialization routine
15542 * @pdev: PCI device information struct
15543 * @ent: entry in i40e_pci_tbl
15545 * i40e_probe initializes a PF identified by a pci_dev structure.
15546 * The OS initialization, configuring of the PF private structure,
15547 * and a hardware reset occur.
15549 * Returns 0 on success, negative on failure
15551 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15553 struct i40e_aq_get_phy_abilities_resp abilities;
15554 #ifdef CONFIG_I40E_DCB
15555 enum i40e_get_fw_lldp_status_resp lldp_status;
15556 #endif /* CONFIG_I40E_DCB */
15557 struct i40e_vsi *vsi;
15558 struct i40e_pf *pf;
15559 struct i40e_hw *hw;
15563 #ifdef CONFIG_I40E_DCB
15565 #endif /* CONFIG_I40E_DCB */
15569 err = pci_enable_device_mem(pdev);
15573 /* set up for high or low dma */
15574 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
15576 dev_err(&pdev->dev,
15577 "DMA configuration failed: 0x%x\n", err);
15581 /* set up pci connections */
15582 err = pci_request_mem_regions(pdev, i40e_driver_name);
15584 dev_info(&pdev->dev,
15585 "pci_request_selected_regions failed %d\n", err);
15589 pci_set_master(pdev);
15591 /* Now that we have a PCI connection, we need to do the
15592 * low level device setup. This is primarily setting up
15593 * the Admin Queue structures and then querying for the
15594 * device's current profile information.
15596 pf = i40e_alloc_pf(&pdev->dev);
15603 set_bit(__I40E_DOWN, pf->state);
15607 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15608 I40E_MAX_CSR_SPACE);
15609 /* We believe that the highest register to read is
15610 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15611 * is not less than that before mapping to prevent a
15614 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15615 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15620 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
15621 if (!hw->hw_addr) {
15623 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15624 (unsigned int)pci_resource_start(pdev, 0),
15625 pf->ioremap_len, err);
15628 hw->vendor_id = pdev->vendor;
15629 hw->device_id = pdev->device;
15630 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
15631 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15632 i40e_set_subsystem_device_id(hw);
15633 hw->bus.device = PCI_SLOT(pdev->devfn);
15634 hw->bus.func = PCI_FUNC(pdev->devfn);
15635 hw->bus.bus_id = pdev->bus->number;
15637 /* Select something other than the 802.1ad ethertype for the
15638 * switch to use internally and drop on ingress.
15640 hw->switch_tag = 0xffff;
15641 hw->first_tag = ETH_P_8021AD;
15642 hw->second_tag = ETH_P_8021Q;
15644 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
15645 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
15646 INIT_LIST_HEAD(&pf->ddp_old_prof);
15648 /* set up the locks for the AQ, do this only once in probe
15649 * and destroy them only once in remove
15651 mutex_init(&hw->aq.asq_mutex);
15652 mutex_init(&hw->aq.arq_mutex);
15654 pf->msg_enable = netif_msg_init(debug,
15659 pf->hw.debug_mask = debug;
15661 /* do a special CORER for clearing PXE mode once at init */
15662 if (hw->revision_id == 0 &&
15663 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15664 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15669 i40e_clear_pxe_mode(hw);
15672 /* Reset here to make sure all is clean and to define PF 'n' */
15675 err = i40e_set_mac_type(hw);
15677 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15682 err = i40e_handle_resets(pf);
15686 i40e_check_recovery_mode(pf);
15688 if (is_kdump_kernel()) {
15689 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15690 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15692 hw->aq.num_arq_entries = I40E_AQ_LEN;
15693 hw->aq.num_asq_entries = I40E_AQ_LEN;
15695 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15696 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15698 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
15700 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
15702 err = i40e_init_shared_code(hw);
15704 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15709 /* set up a default setting for link flow control */
15710 pf->hw.fc.requested_mode = I40E_FC_NONE;
15712 err = i40e_init_adminq(hw);
15715 dev_info(&pdev->dev,
15716 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15717 hw->aq.api_maj_ver,
15718 hw->aq.api_min_ver,
15719 I40E_FW_API_VERSION_MAJOR,
15720 I40E_FW_MINOR_VERSION(hw));
15722 dev_info(&pdev->dev,
15723 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15727 i40e_get_oem_version(hw);
15728 i40e_get_pba_string(hw);
15730 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15731 i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver));
15732 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15733 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15734 hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver,
15735 hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
15736 hw->subsystem_device_id);
15738 if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
15739 I40E_FW_MINOR_VERSION(hw) + 1))
15740 dev_dbg(&pdev->dev,
15741 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15742 hw->aq.api_maj_ver,
15743 hw->aq.api_min_ver,
15744 I40E_FW_API_VERSION_MAJOR,
15745 I40E_FW_MINOR_VERSION(hw));
15746 else if (i40e_is_aq_api_ver_lt(hw, 1, 4))
15747 dev_info(&pdev->dev,
15748 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15749 hw->aq.api_maj_ver,
15750 hw->aq.api_min_ver,
15751 I40E_FW_API_VERSION_MAJOR,
15752 I40E_FW_MINOR_VERSION(hw));
15754 i40e_verify_eeprom(pf);
15756 /* Rev 0 hardware was never productized */
15757 if (hw->revision_id < 1)
15758 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15760 i40e_clear_pxe_mode(hw);
15762 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
15764 goto err_adminq_setup;
15766 err = i40e_sw_init(pf);
15768 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15772 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15773 return i40e_init_recovery_mode(pf, hw);
15775 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
15776 hw->func_caps.num_rx_qp, 0, 0);
15778 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15779 goto err_init_lan_hmc;
15782 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
15784 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15786 goto err_configure_lan_hmc;
15789 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15790 * Ignore error return codes because if it was already disabled via
15791 * hardware settings this will fail
15793 if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) {
15794 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15795 i40e_aq_stop_lldp(hw, true, false, NULL);
15798 /* allow a platform config to override the HW addr */
15799 i40e_get_platform_mac_addr(pdev, pf);
15801 if (!is_valid_ether_addr(hw->mac.addr)) {
15802 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15806 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15807 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
15808 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
15809 if (is_valid_ether_addr(hw->mac.port_addr))
15810 set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps);
15812 i40e_ptp_alloc_pins(pf);
15813 pci_set_drvdata(pdev, pf);
15814 pci_save_state(pdev);
15816 #ifdef CONFIG_I40E_DCB
15817 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
15819 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15820 (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) :
15821 (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags));
15822 dev_info(&pdev->dev,
15823 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
15824 "FW LLDP is disabled\n" :
15825 "FW LLDP is enabled\n");
15827 /* Enable FW to write default DCB config on link-up */
15828 i40e_aq_set_dcb_parameters(hw, true, NULL);
15830 err = i40e_init_pf_dcb(pf);
15832 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15833 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
15834 clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
15835 /* Continue without DCB enabled */
15837 #endif /* CONFIG_I40E_DCB */
15839 /* set up periodic task facility */
15840 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15841 pf->service_timer_period = HZ;
15843 INIT_WORK(&pf->service_task, i40e_service_task);
15844 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15846 /* NVM bit on means WoL disabled for the port */
15847 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15848 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15849 pf->wol_en = false;
15852 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15854 /* set up the main switch operations */
15855 i40e_determine_queue_usage(pf);
15856 err = i40e_init_interrupt_scheme(pf);
15858 goto err_switch_setup;
15860 /* Reduce Tx and Rx pairs for kdump
15861 * When MSI-X is enabled, it's not allowed to use more TC queue
15862 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15863 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15865 if (is_kdump_kernel())
15866 pf->num_lan_msix = 1;
15868 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15869 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15870 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15871 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15872 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15873 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15874 UDP_TUNNEL_TYPE_GENEVE;
15876 /* The number of VSIs reported by the FW is the minimum guaranteed
15877 * to us; HW supports far more and we share the remaining pool with
15878 * the other PFs. We allocate space for more than the guarantee with
15879 * the understanding that we might not get them all later.
15881 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15882 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15884 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15885 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
15886 dev_warn(&pf->pdev->dev,
15887 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
15888 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
15889 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
15892 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15893 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15897 goto err_switch_setup;
15900 #ifdef CONFIG_PCI_IOV
15901 /* prep for VF support */
15902 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
15903 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
15904 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15905 if (pci_num_vf(pdev))
15906 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
15909 err = i40e_setup_pf_switch(pf, false, false);
15911 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15914 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15916 /* if FDIR VSI was set up, start it now */
15917 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
15919 i40e_vsi_open(vsi);
15921 /* The driver only wants link up/down and module qualification
15922 * reports from firmware. Note the negative logic.
15924 err = i40e_aq_set_phy_int_mask(&pf->hw,
15925 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15926 I40E_AQ_EVENT_MEDIA_NA |
15927 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15929 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
15931 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15933 /* Reconfigure hardware for allowing smaller MSS in the case
15934 * of TSO, so that we avoid the MDD being fired and causing
15935 * a reset in the case of small MSS+TSO.
15937 val = rd32(hw, I40E_REG_MSS);
15938 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15939 val &= ~I40E_REG_MSS_MIN_MASK;
15940 val |= I40E_64BYTE_MSS;
15941 wr32(hw, I40E_REG_MSS, val);
15944 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
15946 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15948 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
15950 i40e_aq_str(&pf->hw,
15951 pf->hw.aq.asq_last_status));
15953 /* The main driver is (mostly) up and happy. We need to set this state
15954 * before setting up the misc vector or we get a race and the vector
15955 * ends up disabled forever.
15957 clear_bit(__I40E_DOWN, pf->state);
15959 /* In case of MSIX we are going to setup the misc vector right here
15960 * to handle admin queue events etc. In case of legacy and MSI
15961 * the misc functionality and queue processing is combined in
15962 * the same vector and that gets setup at open.
15964 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
15965 err = i40e_setup_misc_vector(pf);
15967 dev_info(&pdev->dev,
15968 "setup of misc vector failed: %d\n", err);
15969 i40e_cloud_filter_exit(pf);
15970 i40e_fdir_teardown(pf);
15975 #ifdef CONFIG_PCI_IOV
15976 /* prep for VF support */
15977 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
15978 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
15979 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15980 /* disable link interrupts for VFs */
15981 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15982 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15983 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15986 if (pci_num_vf(pdev)) {
15987 dev_info(&pdev->dev,
15988 "Active VFs found, allocating resources.\n");
15989 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15991 dev_info(&pdev->dev,
15992 "Error %d allocating resources for existing VFs\n",
15996 #endif /* CONFIG_PCI_IOV */
15998 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
15999 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
16000 pf->num_iwarp_msix,
16001 I40E_IWARP_IRQ_PILE_ID);
16002 if (pf->iwarp_base_vector < 0) {
16003 dev_info(&pdev->dev,
16004 "failed to get tracking for %d vectors for IWARP err=%d\n",
16005 pf->num_iwarp_msix, pf->iwarp_base_vector);
16006 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
16010 i40e_dbg_pf_init(pf);
16012 /* tell the firmware that we're starting */
16013 i40e_send_version(pf);
16015 /* since everything's happy, start the service_task timer */
16016 mod_timer(&pf->service_timer,
16017 round_jiffies(jiffies + pf->service_timer_period));
16019 /* add this PF to client device list and launch a client service task */
16020 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
16021 err = i40e_lan_add_device(pf);
16023 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16027 #define PCI_SPEED_SIZE 8
16028 #define PCI_WIDTH_SIZE 8
16029 /* Devices on the IOSF bus do not have this information
16030 * and will report PCI Gen 1 x 1 by default so don't bother
16033 if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) {
16034 char speed[PCI_SPEED_SIZE] = "Unknown";
16035 char width[PCI_WIDTH_SIZE] = "Unknown";
16037 /* Get the negotiated link width and speed from PCI config
16040 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
16043 i40e_set_pci_config_data(hw, link_status);
16045 switch (hw->bus.speed) {
16046 case i40e_bus_speed_8000:
16047 strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
16048 case i40e_bus_speed_5000:
16049 strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
16050 case i40e_bus_speed_2500:
16051 strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
16055 switch (hw->bus.width) {
16056 case i40e_bus_width_pcie_x8:
16057 strscpy(width, "8", PCI_WIDTH_SIZE); break;
16058 case i40e_bus_width_pcie_x4:
16059 strscpy(width, "4", PCI_WIDTH_SIZE); break;
16060 case i40e_bus_width_pcie_x2:
16061 strscpy(width, "2", PCI_WIDTH_SIZE); break;
16062 case i40e_bus_width_pcie_x1:
16063 strscpy(width, "1", PCI_WIDTH_SIZE); break;
16068 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16071 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16072 hw->bus.speed < i40e_bus_speed_8000) {
16073 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16074 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16078 /* get the requested speeds from the fw */
16079 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
16081 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
16083 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16084 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16086 /* set the FEC config due to the board capabilities */
16087 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags);
16089 /* get the supported phy types from the fw */
16090 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
16092 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
16094 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16096 /* make sure the MFS hasn't been set lower than the default */
16097 #define MAX_FRAME_SIZE_DEFAULT 0x2600
16098 val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
16099 rd32(&pf->hw, I40E_PRTGL_SAH));
16100 if (val < MAX_FRAME_SIZE_DEFAULT)
16101 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16104 /* Add a filter to drop all Flow control frames from any VSI from being
16105 * transmitted. By doing so we stop a malicious VF from sending out
16106 * PAUSE or PFC frames and potentially controlling traffic for other
16108 * The FW can still send Flow control frames if enabled.
16110 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
16111 pf->main_vsi_seid);
16113 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16114 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16115 set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps);
16116 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16117 set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps);
16118 /* print a string summarizing features */
16119 i40e_print_features(pf);
16121 i40e_devlink_register(pf);
16125 /* Unwind what we've done if something failed in the setup */
16127 set_bit(__I40E_DOWN, pf->state);
16128 i40e_clear_interrupt_scheme(pf);
16131 i40e_reset_interrupt_capability(pf);
16132 timer_shutdown_sync(&pf->service_timer);
16134 err_configure_lan_hmc:
16135 (void)i40e_shutdown_lan_hmc(hw);
16137 kfree(pf->qp_pile);
16141 iounmap(hw->hw_addr);
16145 pci_release_mem_regions(pdev);
16148 pci_disable_device(pdev);
16153 * i40e_remove - Device removal routine
16154 * @pdev: PCI device information struct
16156 * i40e_remove is called by the PCI subsystem to alert the driver
16157 * that is should release a PCI device. This could be caused by a
16158 * Hot-Plug event, or because the driver is going to be removed from
16161 static void i40e_remove(struct pci_dev *pdev)
16163 struct i40e_pf *pf = pci_get_drvdata(pdev);
16164 struct i40e_hw *hw = &pf->hw;
16165 struct i40e_vsi *vsi;
16166 struct i40e_veb *veb;
16170 i40e_devlink_unregister(pf);
16172 i40e_dbg_pf_exit(pf);
16176 /* Disable RSS in hw */
16177 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
16178 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
16180 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
16181 * flags, once they are set, i40e_rebuild should not be called as
16182 * i40e_prep_for_reset always returns early.
16184 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
16185 usleep_range(1000, 2000);
16186 set_bit(__I40E_IN_REMOVE, pf->state);
16188 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) {
16189 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
16191 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
16193 /* no more scheduling of any task */
16194 set_bit(__I40E_SUSPENDED, pf->state);
16195 set_bit(__I40E_DOWN, pf->state);
16196 if (pf->service_timer.function)
16197 timer_shutdown_sync(&pf->service_timer);
16198 if (pf->service_task.func)
16199 cancel_work_sync(&pf->service_task);
16201 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16202 struct i40e_vsi *vsi = pf->vsi[0];
16204 /* We know that we have allocated only one vsi for this PF,
16205 * it was just for registering netdevice, so the interface
16206 * could be visible in the 'ifconfig' output
16208 unregister_netdev(vsi->netdev);
16209 free_netdev(vsi->netdev);
16214 /* Client close must be called explicitly here because the timer
16215 * has been stopped.
16217 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16219 i40e_fdir_teardown(pf);
16221 /* If there is a switch structure or any orphans, remove them.
16222 * This will leave only the PF's VSI remaining.
16224 i40e_pf_for_each_veb(pf, i, veb)
16225 if (veb->uplink_seid == pf->mac_seid ||
16226 veb->uplink_seid == 0)
16227 i40e_switch_branch_release(veb);
16229 /* Now we can shutdown the PF's VSIs, just before we kill
16232 i40e_pf_for_each_vsi(pf, i, vsi) {
16233 i40e_vsi_close(vsi);
16234 i40e_vsi_release(vsi);
16238 i40e_cloud_filter_exit(pf);
16240 /* remove attached clients */
16241 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
16242 ret_code = i40e_lan_del_device(pf);
16244 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16248 /* shutdown and destroy the HMC */
16249 if (hw->hmc.hmc_obj) {
16250 ret_code = i40e_shutdown_lan_hmc(hw);
16252 dev_warn(&pdev->dev,
16253 "Failed to destroy the HMC resources: %d\n",
16258 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16259 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16260 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
16261 free_irq(pf->pdev->irq, pf);
16263 /* shutdown the adminq */
16264 i40e_shutdown_adminq(hw);
16266 /* destroy the locks only once, here */
16267 mutex_destroy(&hw->aq.arq_mutex);
16268 mutex_destroy(&hw->aq.asq_mutex);
16270 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16272 i40e_clear_interrupt_scheme(pf);
16273 i40e_pf_for_each_vsi(pf, i, vsi) {
16274 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16275 i40e_vsi_clear_rings(vsi);
16277 i40e_vsi_clear(vsi);
16282 i40e_pf_for_each_veb(pf, i, veb) {
16287 kfree(pf->qp_pile);
16290 iounmap(hw->hw_addr);
16292 pci_release_mem_regions(pdev);
16294 pci_disable_device(pdev);
16298 * i40e_pci_error_detected - warning that something funky happened in PCI land
16299 * @pdev: PCI device information struct
16300 * @error: the type of PCI error
16302 * Called to warn that something happened and the error handling steps
16303 * are in progress. Allows the driver to quiesce things, be ready for
16306 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16307 pci_channel_state_t error)
16309 struct i40e_pf *pf = pci_get_drvdata(pdev);
16311 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16314 dev_info(&pdev->dev,
16315 "Cannot recover - error happened during device probe\n");
16316 return PCI_ERS_RESULT_DISCONNECT;
16319 /* shutdown all operations */
16320 if (!test_bit(__I40E_SUSPENDED, pf->state))
16321 i40e_prep_for_reset(pf);
16323 /* Request a slot reset */
16324 return PCI_ERS_RESULT_NEED_RESET;
16328 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16329 * @pdev: PCI device information struct
16331 * Called to find if the driver can work with the device now that
16332 * the pci slot has been reset. If a basic connection seems good
16333 * (registers are readable and have sane content) then return a
16334 * happy little PCI_ERS_RESULT_xxx.
16336 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16338 struct i40e_pf *pf = pci_get_drvdata(pdev);
16339 pci_ers_result_t result;
16342 dev_dbg(&pdev->dev, "%s\n", __func__);
16343 if (pci_enable_device_mem(pdev)) {
16344 dev_info(&pdev->dev,
16345 "Cannot re-enable PCI device after reset.\n");
16346 result = PCI_ERS_RESULT_DISCONNECT;
16348 pci_set_master(pdev);
16349 pci_restore_state(pdev);
16350 pci_save_state(pdev);
16351 pci_wake_from_d3(pdev, false);
16353 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16355 result = PCI_ERS_RESULT_RECOVERED;
16357 result = PCI_ERS_RESULT_DISCONNECT;
16364 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16365 * @pdev: PCI device information struct
16367 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16369 struct i40e_pf *pf = pci_get_drvdata(pdev);
16371 i40e_prep_for_reset(pf);
16375 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16376 * @pdev: PCI device information struct
16378 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16380 struct i40e_pf *pf = pci_get_drvdata(pdev);
16382 if (test_bit(__I40E_IN_REMOVE, pf->state))
16385 i40e_reset_and_rebuild(pf, false, false);
16386 #ifdef CONFIG_PCI_IOV
16387 i40e_restore_all_vfs_msi_state(pdev);
16388 #endif /* CONFIG_PCI_IOV */
16392 * i40e_pci_error_resume - restart operations after PCI error recovery
16393 * @pdev: PCI device information struct
16395 * Called to allow the driver to bring things back up after PCI error
16396 * and/or reset recovery has finished.
16398 static void i40e_pci_error_resume(struct pci_dev *pdev)
16400 struct i40e_pf *pf = pci_get_drvdata(pdev);
16402 dev_dbg(&pdev->dev, "%s\n", __func__);
16403 if (test_bit(__I40E_SUSPENDED, pf->state))
16406 i40e_handle_reset_warning(pf, false);
16410 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16411 * using the mac_address_write admin q function
16412 * @pf: pointer to i40e_pf struct
16414 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16416 struct i40e_hw *hw = &pf->hw;
16421 /* Get current MAC address in case it's an LAA */
16422 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16423 ether_addr_copy(mac_addr,
16424 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16426 dev_err(&pf->pdev->dev,
16427 "Failed to retrieve MAC address; using default\n");
16428 ether_addr_copy(mac_addr, hw->mac.addr);
16431 /* The FW expects the mac address write cmd to first be called with
16432 * one of these flags before calling it again with the multicast
16435 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16437 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16438 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16440 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16442 dev_err(&pf->pdev->dev,
16443 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16447 flags = I40E_AQC_MC_MAG_EN
16448 | I40E_AQC_WOL_PRESERVE_ON_PFR
16449 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16450 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16452 dev_err(&pf->pdev->dev,
16453 "Failed to enable Multicast Magic Packet wake up\n");
16457 * i40e_shutdown - PCI callback for shutting down
16458 * @pdev: PCI device information struct
16460 static void i40e_shutdown(struct pci_dev *pdev)
16462 struct i40e_pf *pf = pci_get_drvdata(pdev);
16463 struct i40e_hw *hw = &pf->hw;
16465 set_bit(__I40E_SUSPENDED, pf->state);
16466 set_bit(__I40E_DOWN, pf->state);
16468 del_timer_sync(&pf->service_timer);
16469 cancel_work_sync(&pf->service_task);
16470 i40e_cloud_filter_exit(pf);
16471 i40e_fdir_teardown(pf);
16473 /* Client close must be called explicitly here because the timer
16474 * has been stopped.
16476 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16478 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
16480 i40e_enable_mc_magic_wake(pf);
16482 i40e_prep_for_reset(pf);
16484 wr32(hw, I40E_PFPM_APM,
16485 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16486 wr32(hw, I40E_PFPM_WUFC,
16487 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16489 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16490 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16491 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
16492 free_irq(pf->pdev->irq, pf);
16494 /* Since we're going to destroy queues during the
16495 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16499 i40e_clear_interrupt_scheme(pf);
16502 if (system_state == SYSTEM_POWER_OFF) {
16503 pci_wake_from_d3(pdev, pf->wol_en);
16504 pci_set_power_state(pdev, PCI_D3hot);
16509 * i40e_suspend - PM callback for moving to D3
16510 * @dev: generic device information structure
16512 static int __maybe_unused i40e_suspend(struct device *dev)
16514 struct i40e_pf *pf = dev_get_drvdata(dev);
16515 struct i40e_hw *hw = &pf->hw;
16517 /* If we're already suspended, then there is nothing to do */
16518 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
16521 set_bit(__I40E_DOWN, pf->state);
16523 /* Ensure service task will not be running */
16524 del_timer_sync(&pf->service_timer);
16525 cancel_work_sync(&pf->service_task);
16527 /* Client close must be called explicitly here because the timer
16528 * has been stopped.
16530 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
16532 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
16534 i40e_enable_mc_magic_wake(pf);
16536 /* Since we're going to destroy queues during the
16537 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16542 i40e_prep_for_reset(pf);
16544 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16545 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16547 /* Clear the interrupt scheme and release our IRQs so that the system
16548 * can safely hibernate even when there are a large number of CPUs.
16549 * Otherwise hibernation might fail when mapping all the vectors back
16552 i40e_clear_interrupt_scheme(pf);
16560 * i40e_resume - PM callback for waking up from D3
16561 * @dev: generic device information structure
16563 static int __maybe_unused i40e_resume(struct device *dev)
16565 struct i40e_pf *pf = dev_get_drvdata(dev);
16568 /* If we're not suspended, then there is nothing to do */
16569 if (!test_bit(__I40E_SUSPENDED, pf->state))
16572 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16573 * since we're going to be restoring queues
16577 /* We cleared the interrupt scheme when we suspended, so we need to
16578 * restore it now to resume device functionality.
16580 err = i40e_restore_interrupt_scheme(pf);
16582 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16586 clear_bit(__I40E_DOWN, pf->state);
16587 i40e_reset_and_rebuild(pf, false, true);
16591 /* Clear suspended state last after everything is recovered */
16592 clear_bit(__I40E_SUSPENDED, pf->state);
16594 /* Restart the service task */
16595 mod_timer(&pf->service_timer,
16596 round_jiffies(jiffies + pf->service_timer_period));
16601 static const struct pci_error_handlers i40e_err_handler = {
16602 .error_detected = i40e_pci_error_detected,
16603 .slot_reset = i40e_pci_error_slot_reset,
16604 .reset_prepare = i40e_pci_error_reset_prepare,
16605 .reset_done = i40e_pci_error_reset_done,
16606 .resume = i40e_pci_error_resume,
16609 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16611 static struct pci_driver i40e_driver = {
16612 .name = i40e_driver_name,
16613 .id_table = i40e_pci_tbl,
16614 .probe = i40e_probe,
16615 .remove = i40e_remove,
16617 .pm = &i40e_pm_ops,
16619 .shutdown = i40e_shutdown,
16620 .err_handler = &i40e_err_handler,
16621 .sriov_configure = i40e_pci_sriov_configure,
16625 * i40e_init_module - Driver registration routine
16627 * i40e_init_module is the first routine called when the driver is
16628 * loaded. All it does is register with the PCI subsystem.
16630 static int __init i40e_init_module(void)
16634 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16635 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16637 /* There is no need to throttle the number of active tasks because
16638 * each device limits its own task using a state bit for scheduling
16639 * the service task, and the device tasks do not interfere with each
16640 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16641 * since we need to be able to guarantee forward progress even under
16644 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
16646 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16651 err = pci_register_driver(&i40e_driver);
16653 destroy_workqueue(i40e_wq);
16660 module_init(i40e_init_module);
16663 * i40e_exit_module - Driver exit cleanup routine
16665 * i40e_exit_module is called just before the driver is removed
16668 static void __exit i40e_exit_module(void)
16670 pci_unregister_driver(&i40e_driver);
16671 destroy_workqueue(i40e_wq);
16672 ida_destroy(&i40e_client_ida);
16675 module_exit(i40e_exit_module);