1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15 /* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
22 const char i40e_driver_name[] = "i40e";
23 static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 8
30 #define DRV_VERSION_BUILD 20
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str[] = DRV_VERSION;
35 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
37 /* a bit of forward declarations */
38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43 static int i40e_setup_misc_vector(struct i40e_pf *pf);
44 static void i40e_determine_queue_usage(struct i40e_pf *pf);
45 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55 static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
59 /* i40e_pci_tbl - PCI Device ID Table
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91 /* required last entry */
94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
96 #define I40E_MAX_VF_COUNT 128
97 static int debug = -1;
98 module_param(debug, uint, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103 MODULE_LICENSE("GPL v2");
104 MODULE_VERSION(DRV_VERSION);
106 static struct workqueue_struct *i40e_wq;
109 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
110 * @hw: pointer to the HW structure
111 * @mem: ptr to mem struct to fill out
112 * @size: size of memory requested
113 * @alignment: what to align the allocation to
115 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
116 u64 size, u32 alignment)
118 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
120 mem->size = ALIGN(size, alignment);
121 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
130 * i40e_free_dma_mem_d - OS specific memory free for shared code
131 * @hw: pointer to the HW structure
132 * @mem: ptr to mem struct to free
134 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
138 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
147 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
148 * @hw: pointer to the HW structure
149 * @mem: ptr to mem struct to fill out
150 * @size: size of memory requested
152 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
156 mem->va = kzalloc(size, GFP_KERNEL);
165 * i40e_free_virt_mem_d - OS specific memory free for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to free
169 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
171 /* it's ok to kfree a NULL pointer */
180 * i40e_get_lump - find a lump of free generic resource
181 * @pf: board private structure
182 * @pile: the pile of resource to search
183 * @needed: the number of items needed
184 * @id: an owner id to stick on the items assigned
186 * Returns the base item index of the lump, or negative for error
188 * The search_hint trick and lack of advanced fit-finding only work
189 * because we're highly likely to have all the same size lump requests.
190 * Linear search time and any fragmentation should be minimal.
192 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
198 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
199 dev_info(&pf->pdev->dev,
200 "param err: pile=%s needed=%d id=0x%04x\n",
201 pile ? "<valid>" : "<null>", needed, id);
205 /* start the linear search with an imperfect hint */
206 i = pile->search_hint;
207 while (i < pile->num_entries) {
208 /* skip already allocated entries */
209 if (pile->list[i] & I40E_PILE_VALID_BIT) {
214 /* do we have enough in this lump? */
215 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
216 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
221 /* there was enough, so assign it to the requestor */
222 for (j = 0; j < needed; j++)
223 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
225 pile->search_hint = i + j;
229 /* not enough, so skip over it and continue looking */
237 * i40e_put_lump - return a lump of generic resource
238 * @pile: the pile of resource to search
239 * @index: the base item index
240 * @id: the owner id of the items assigned
242 * Returns the count of items in the lump
244 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
246 int valid_id = (id | I40E_PILE_VALID_BIT);
250 if (!pile || index >= pile->num_entries)
254 i < pile->num_entries && pile->list[i] == valid_id;
260 if (count && index < pile->search_hint)
261 pile->search_hint = index;
267 * i40e_find_vsi_from_id - searches for the vsi with the given id
268 * @pf: the pf structure to search for the vsi
269 * @id: id of the vsi it is searching for
271 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
275 for (i = 0; i < pf->num_alloc_vsi; i++)
276 if (pf->vsi[i] && (pf->vsi[i]->id == id))
283 * i40e_service_event_schedule - Schedule the service task to wake up
284 * @pf: board private structure
286 * If not already scheduled, this puts the task into the work queue
288 void i40e_service_event_schedule(struct i40e_pf *pf)
290 if ((!test_bit(__I40E_DOWN, pf->state) &&
291 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
292 test_bit(__I40E_RECOVERY_MODE, pf->state))
293 queue_work(i40e_wq, &pf->service_task);
297 * i40e_tx_timeout - Respond to a Tx Hang
298 * @netdev: network interface device structure
300 * If any port has noticed a Tx timeout, it is likely that the whole
301 * device is munged, not just the one netdev port, so go for the full
304 static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
306 struct i40e_netdev_priv *np = netdev_priv(netdev);
307 struct i40e_vsi *vsi = np->vsi;
308 struct i40e_pf *pf = vsi->back;
309 struct i40e_ring *tx_ring = NULL;
313 pf->tx_timeout_count++;
315 /* with txqueue index, find the tx_ring struct */
316 for (i = 0; i < vsi->num_queue_pairs; i++) {
317 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
319 vsi->tx_rings[i]->queue_index) {
320 tx_ring = vsi->tx_rings[i];
326 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
327 pf->tx_timeout_recovery_level = 1; /* reset after some time */
328 else if (time_before(jiffies,
329 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
330 return; /* don't do any new action before the next timeout */
332 /* don't kick off another recovery if one is already pending */
333 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
337 head = i40e_get_head(tx_ring);
338 /* Read interrupt register */
339 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
341 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
342 tx_ring->vsi->base_vector - 1));
344 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
346 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
347 vsi->seid, txqueue, tx_ring->next_to_clean,
348 head, tx_ring->next_to_use,
349 readl(tx_ring->tail), val);
352 pf->tx_timeout_last_recovery = jiffies;
353 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
354 pf->tx_timeout_recovery_level, txqueue);
356 switch (pf->tx_timeout_recovery_level) {
358 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
361 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
364 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
367 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
371 i40e_service_event_schedule(pf);
372 pf->tx_timeout_recovery_level++;
376 * i40e_get_vsi_stats_struct - Get System Network Statistics
377 * @vsi: the VSI we care about
379 * Returns the address of the device statistics structure.
380 * The statistics are actually updated from the service task.
382 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
384 return &vsi->net_stats;
388 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
389 * @ring: Tx ring to get statistics from
390 * @stats: statistics entry to be updated
392 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
393 struct rtnl_link_stats64 *stats)
399 start = u64_stats_fetch_begin_irq(&ring->syncp);
400 packets = ring->stats.packets;
401 bytes = ring->stats.bytes;
402 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
404 stats->tx_packets += packets;
405 stats->tx_bytes += bytes;
409 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
410 * @netdev: network interface device structure
411 * @stats: data structure to store statistics
413 * Returns the address of the device statistics structure.
414 * The statistics are actually updated from the service task.
416 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
417 struct rtnl_link_stats64 *stats)
419 struct i40e_netdev_priv *np = netdev_priv(netdev);
420 struct i40e_vsi *vsi = np->vsi;
421 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
422 struct i40e_ring *ring;
425 if (test_bit(__I40E_VSI_DOWN, vsi->state))
432 for (i = 0; i < vsi->num_queue_pairs; i++) {
436 ring = READ_ONCE(vsi->tx_rings[i]);
439 i40e_get_netdev_stats_struct_tx(ring, stats);
441 if (i40e_enabled_xdp_vsi(vsi)) {
443 i40e_get_netdev_stats_struct_tx(ring, stats);
448 start = u64_stats_fetch_begin_irq(&ring->syncp);
449 packets = ring->stats.packets;
450 bytes = ring->stats.bytes;
451 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
453 stats->rx_packets += packets;
454 stats->rx_bytes += bytes;
459 /* following stats updated by i40e_watchdog_subtask() */
460 stats->multicast = vsi_stats->multicast;
461 stats->tx_errors = vsi_stats->tx_errors;
462 stats->tx_dropped = vsi_stats->tx_dropped;
463 stats->rx_errors = vsi_stats->rx_errors;
464 stats->rx_dropped = vsi_stats->rx_dropped;
465 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
466 stats->rx_length_errors = vsi_stats->rx_length_errors;
470 * i40e_vsi_reset_stats - Resets all stats of the given vsi
471 * @vsi: the VSI to have its stats reset
473 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
475 struct rtnl_link_stats64 *ns;
481 ns = i40e_get_vsi_stats_struct(vsi);
482 memset(ns, 0, sizeof(*ns));
483 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
484 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
485 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
486 if (vsi->rx_rings && vsi->rx_rings[0]) {
487 for (i = 0; i < vsi->num_queue_pairs; i++) {
488 memset(&vsi->rx_rings[i]->stats, 0,
489 sizeof(vsi->rx_rings[i]->stats));
490 memset(&vsi->rx_rings[i]->rx_stats, 0,
491 sizeof(vsi->rx_rings[i]->rx_stats));
492 memset(&vsi->tx_rings[i]->stats, 0,
493 sizeof(vsi->tx_rings[i]->stats));
494 memset(&vsi->tx_rings[i]->tx_stats, 0,
495 sizeof(vsi->tx_rings[i]->tx_stats));
498 vsi->stat_offsets_loaded = false;
502 * i40e_pf_reset_stats - Reset all of the stats for the given PF
503 * @pf: the PF to be reset
505 void i40e_pf_reset_stats(struct i40e_pf *pf)
509 memset(&pf->stats, 0, sizeof(pf->stats));
510 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
511 pf->stat_offsets_loaded = false;
513 for (i = 0; i < I40E_MAX_VEB; i++) {
515 memset(&pf->veb[i]->stats, 0,
516 sizeof(pf->veb[i]->stats));
517 memset(&pf->veb[i]->stats_offsets, 0,
518 sizeof(pf->veb[i]->stats_offsets));
519 memset(&pf->veb[i]->tc_stats, 0,
520 sizeof(pf->veb[i]->tc_stats));
521 memset(&pf->veb[i]->tc_stats_offsets, 0,
522 sizeof(pf->veb[i]->tc_stats_offsets));
523 pf->veb[i]->stat_offsets_loaded = false;
526 pf->hw_csum_rx_error = 0;
530 * i40e_stat_update48 - read and update a 48 bit stat from the chip
531 * @hw: ptr to the hardware info
532 * @hireg: the high 32 bit reg to read
533 * @loreg: the low 32 bit reg to read
534 * @offset_loaded: has the initial offset been loaded yet
535 * @offset: ptr to current offset value
536 * @stat: ptr to the stat
538 * Since the device stats are not reset at PFReset, they likely will not
539 * be zeroed when the driver starts. We'll save the first values read
540 * and use them as offsets to be subtracted from the raw values in order
541 * to report stats that count from zero. In the process, we also manage
542 * the potential roll-over.
544 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
545 bool offset_loaded, u64 *offset, u64 *stat)
549 if (hw->device_id == I40E_DEV_ID_QEMU) {
550 new_data = rd32(hw, loreg);
551 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
553 new_data = rd64(hw, loreg);
557 if (likely(new_data >= *offset))
558 *stat = new_data - *offset;
560 *stat = (new_data + BIT_ULL(48)) - *offset;
561 *stat &= 0xFFFFFFFFFFFFULL;
565 * i40e_stat_update32 - read and update a 32 bit stat from the chip
566 * @hw: ptr to the hardware info
567 * @reg: the hw reg to read
568 * @offset_loaded: has the initial offset been loaded yet
569 * @offset: ptr to current offset value
570 * @stat: ptr to the stat
572 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
573 bool offset_loaded, u64 *offset, u64 *stat)
577 new_data = rd32(hw, reg);
580 if (likely(new_data >= *offset))
581 *stat = (u32)(new_data - *offset);
583 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
587 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
588 * @hw: ptr to the hardware info
589 * @reg: the hw reg to read and clear
590 * @stat: ptr to the stat
592 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
594 u32 new_data = rd32(hw, reg);
596 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
601 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
602 * @vsi: the VSI to be updated
604 void i40e_update_eth_stats(struct i40e_vsi *vsi)
606 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
607 struct i40e_pf *pf = vsi->back;
608 struct i40e_hw *hw = &pf->hw;
609 struct i40e_eth_stats *oes;
610 struct i40e_eth_stats *es; /* device's eth stats */
612 es = &vsi->eth_stats;
613 oes = &vsi->eth_stats_offsets;
615 /* Gather up the stats that the hw collects */
616 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
617 vsi->stat_offsets_loaded,
618 &oes->tx_errors, &es->tx_errors);
619 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
620 vsi->stat_offsets_loaded,
621 &oes->rx_discards, &es->rx_discards);
622 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
623 vsi->stat_offsets_loaded,
624 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
626 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
627 I40E_GLV_GORCL(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->rx_bytes, &es->rx_bytes);
630 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
631 I40E_GLV_UPRCL(stat_idx),
632 vsi->stat_offsets_loaded,
633 &oes->rx_unicast, &es->rx_unicast);
634 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
635 I40E_GLV_MPRCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_multicast, &es->rx_multicast);
638 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
639 I40E_GLV_BPRCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->rx_broadcast, &es->rx_broadcast);
643 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
644 I40E_GLV_GOTCL(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->tx_bytes, &es->tx_bytes);
647 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
648 I40E_GLV_UPTCL(stat_idx),
649 vsi->stat_offsets_loaded,
650 &oes->tx_unicast, &es->tx_unicast);
651 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
652 I40E_GLV_MPTCL(stat_idx),
653 vsi->stat_offsets_loaded,
654 &oes->tx_multicast, &es->tx_multicast);
655 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
656 I40E_GLV_BPTCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->tx_broadcast, &es->tx_broadcast);
659 vsi->stat_offsets_loaded = true;
663 * i40e_update_veb_stats - Update Switch component statistics
664 * @veb: the VEB being updated
666 void i40e_update_veb_stats(struct i40e_veb *veb)
668 struct i40e_pf *pf = veb->pf;
669 struct i40e_hw *hw = &pf->hw;
670 struct i40e_eth_stats *oes;
671 struct i40e_eth_stats *es; /* device's eth stats */
672 struct i40e_veb_tc_stats *veb_oes;
673 struct i40e_veb_tc_stats *veb_es;
676 idx = veb->stats_idx;
678 oes = &veb->stats_offsets;
679 veb_es = &veb->tc_stats;
680 veb_oes = &veb->tc_stats_offsets;
682 /* Gather up the stats that the hw collects */
683 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
684 veb->stat_offsets_loaded,
685 &oes->tx_discards, &es->tx_discards);
686 if (hw->revision_id > 0)
687 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
688 veb->stat_offsets_loaded,
689 &oes->rx_unknown_protocol,
690 &es->rx_unknown_protocol);
691 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
692 veb->stat_offsets_loaded,
693 &oes->rx_bytes, &es->rx_bytes);
694 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
695 veb->stat_offsets_loaded,
696 &oes->rx_unicast, &es->rx_unicast);
697 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
698 veb->stat_offsets_loaded,
699 &oes->rx_multicast, &es->rx_multicast);
700 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
701 veb->stat_offsets_loaded,
702 &oes->rx_broadcast, &es->rx_broadcast);
704 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
705 veb->stat_offsets_loaded,
706 &oes->tx_bytes, &es->tx_bytes);
707 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
708 veb->stat_offsets_loaded,
709 &oes->tx_unicast, &es->tx_unicast);
710 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->tx_multicast, &es->tx_multicast);
713 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->tx_broadcast, &es->tx_broadcast);
716 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
717 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
718 I40E_GLVEBTC_RPCL(i, idx),
719 veb->stat_offsets_loaded,
720 &veb_oes->tc_rx_packets[i],
721 &veb_es->tc_rx_packets[i]);
722 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
723 I40E_GLVEBTC_RBCL(i, idx),
724 veb->stat_offsets_loaded,
725 &veb_oes->tc_rx_bytes[i],
726 &veb_es->tc_rx_bytes[i]);
727 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
728 I40E_GLVEBTC_TPCL(i, idx),
729 veb->stat_offsets_loaded,
730 &veb_oes->tc_tx_packets[i],
731 &veb_es->tc_tx_packets[i]);
732 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
733 I40E_GLVEBTC_TBCL(i, idx),
734 veb->stat_offsets_loaded,
735 &veb_oes->tc_tx_bytes[i],
736 &veb_es->tc_tx_bytes[i]);
738 veb->stat_offsets_loaded = true;
742 * i40e_update_vsi_stats - Update the vsi statistics counters.
743 * @vsi: the VSI to be updated
745 * There are a few instances where we store the same stat in a
746 * couple of different structs. This is partly because we have
747 * the netdev stats that need to be filled out, which is slightly
748 * different from the "eth_stats" defined by the chip and used in
749 * VF communications. We sort it out here.
751 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
753 struct i40e_pf *pf = vsi->back;
754 struct rtnl_link_stats64 *ons;
755 struct rtnl_link_stats64 *ns; /* netdev stats */
756 struct i40e_eth_stats *oes;
757 struct i40e_eth_stats *es; /* device's eth stats */
758 u32 tx_restart, tx_busy;
769 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
770 test_bit(__I40E_CONFIG_BUSY, pf->state))
773 ns = i40e_get_vsi_stats_struct(vsi);
774 ons = &vsi->net_stats_offsets;
775 es = &vsi->eth_stats;
776 oes = &vsi->eth_stats_offsets;
778 /* Gather up the netdev and vsi stats that the driver collects
779 * on the fly during packet processing
783 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
787 for (q = 0; q < vsi->num_queue_pairs; q++) {
789 p = READ_ONCE(vsi->tx_rings[q]);
792 start = u64_stats_fetch_begin_irq(&p->syncp);
793 packets = p->stats.packets;
794 bytes = p->stats.bytes;
795 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
798 tx_restart += p->tx_stats.restart_queue;
799 tx_busy += p->tx_stats.tx_busy;
800 tx_linearize += p->tx_stats.tx_linearize;
801 tx_force_wb += p->tx_stats.tx_force_wb;
803 /* Rx queue is part of the same block as Tx queue */
806 start = u64_stats_fetch_begin_irq(&p->syncp);
807 packets = p->stats.packets;
808 bytes = p->stats.bytes;
809 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
812 rx_buf += p->rx_stats.alloc_buff_failed;
813 rx_page += p->rx_stats.alloc_page_failed;
816 vsi->tx_restart = tx_restart;
817 vsi->tx_busy = tx_busy;
818 vsi->tx_linearize = tx_linearize;
819 vsi->tx_force_wb = tx_force_wb;
820 vsi->rx_page_failed = rx_page;
821 vsi->rx_buf_failed = rx_buf;
823 ns->rx_packets = rx_p;
825 ns->tx_packets = tx_p;
828 /* update netdev stats from eth stats */
829 i40e_update_eth_stats(vsi);
830 ons->tx_errors = oes->tx_errors;
831 ns->tx_errors = es->tx_errors;
832 ons->multicast = oes->rx_multicast;
833 ns->multicast = es->rx_multicast;
834 ons->rx_dropped = oes->rx_discards;
835 ns->rx_dropped = es->rx_discards;
836 ons->tx_dropped = oes->tx_discards;
837 ns->tx_dropped = es->tx_discards;
839 /* pull in a couple PF stats if this is the main vsi */
840 if (vsi == pf->vsi[pf->lan_vsi]) {
841 ns->rx_crc_errors = pf->stats.crc_errors;
842 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
843 ns->rx_length_errors = pf->stats.rx_length_errors;
848 * i40e_update_pf_stats - Update the PF statistics counters.
849 * @pf: the PF to be updated
851 static void i40e_update_pf_stats(struct i40e_pf *pf)
853 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
854 struct i40e_hw_port_stats *nsd = &pf->stats;
855 struct i40e_hw *hw = &pf->hw;
859 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
860 I40E_GLPRT_GORCL(hw->port),
861 pf->stat_offsets_loaded,
862 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
863 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
864 I40E_GLPRT_GOTCL(hw->port),
865 pf->stat_offsets_loaded,
866 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
867 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
868 pf->stat_offsets_loaded,
869 &osd->eth.rx_discards,
870 &nsd->eth.rx_discards);
871 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
872 I40E_GLPRT_UPRCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.rx_unicast,
875 &nsd->eth.rx_unicast);
876 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
877 I40E_GLPRT_MPRCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_multicast,
880 &nsd->eth.rx_multicast);
881 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
882 I40E_GLPRT_BPRCL(hw->port),
883 pf->stat_offsets_loaded,
884 &osd->eth.rx_broadcast,
885 &nsd->eth.rx_broadcast);
886 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
887 I40E_GLPRT_UPTCL(hw->port),
888 pf->stat_offsets_loaded,
889 &osd->eth.tx_unicast,
890 &nsd->eth.tx_unicast);
891 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
892 I40E_GLPRT_MPTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_multicast,
895 &nsd->eth.tx_multicast);
896 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
897 I40E_GLPRT_BPTCL(hw->port),
898 pf->stat_offsets_loaded,
899 &osd->eth.tx_broadcast,
900 &nsd->eth.tx_broadcast);
902 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
903 pf->stat_offsets_loaded,
904 &osd->tx_dropped_link_down,
905 &nsd->tx_dropped_link_down);
907 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
908 pf->stat_offsets_loaded,
909 &osd->crc_errors, &nsd->crc_errors);
911 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->illegal_bytes, &nsd->illegal_bytes);
915 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->mac_local_faults,
918 &nsd->mac_local_faults);
919 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->mac_remote_faults,
922 &nsd->mac_remote_faults);
924 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_length_errors,
927 &nsd->rx_length_errors);
929 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
930 pf->stat_offsets_loaded,
931 &osd->link_xon_rx, &nsd->link_xon_rx);
932 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->link_xon_tx, &nsd->link_xon_tx);
935 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->link_xoff_rx, &nsd->link_xoff_rx);
938 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->link_xoff_tx, &nsd->link_xoff_tx);
942 for (i = 0; i < 8; i++) {
943 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
944 pf->stat_offsets_loaded,
945 &osd->priority_xoff_rx[i],
946 &nsd->priority_xoff_rx[i]);
947 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
948 pf->stat_offsets_loaded,
949 &osd->priority_xon_rx[i],
950 &nsd->priority_xon_rx[i]);
951 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
952 pf->stat_offsets_loaded,
953 &osd->priority_xon_tx[i],
954 &nsd->priority_xon_tx[i]);
955 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
956 pf->stat_offsets_loaded,
957 &osd->priority_xoff_tx[i],
958 &nsd->priority_xoff_tx[i]);
959 i40e_stat_update32(hw,
960 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xon_2_xoff[i],
963 &nsd->priority_xon_2_xoff[i]);
966 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
967 I40E_GLPRT_PRC64L(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->rx_size_64, &nsd->rx_size_64);
970 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
971 I40E_GLPRT_PRC127L(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->rx_size_127, &nsd->rx_size_127);
974 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
975 I40E_GLPRT_PRC255L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->rx_size_255, &nsd->rx_size_255);
978 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
979 I40E_GLPRT_PRC511L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->rx_size_511, &nsd->rx_size_511);
982 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
983 I40E_GLPRT_PRC1023L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->rx_size_1023, &nsd->rx_size_1023);
986 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
987 I40E_GLPRT_PRC1522L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->rx_size_1522, &nsd->rx_size_1522);
990 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
991 I40E_GLPRT_PRC9522L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->rx_size_big, &nsd->rx_size_big);
995 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
996 I40E_GLPRT_PTC64L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->tx_size_64, &nsd->tx_size_64);
999 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1000 I40E_GLPRT_PTC127L(hw->port),
1001 pf->stat_offsets_loaded,
1002 &osd->tx_size_127, &nsd->tx_size_127);
1003 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1004 I40E_GLPRT_PTC255L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->tx_size_255, &nsd->tx_size_255);
1007 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1008 I40E_GLPRT_PTC511L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->tx_size_511, &nsd->tx_size_511);
1011 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1012 I40E_GLPRT_PTC1023L(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->tx_size_1023, &nsd->tx_size_1023);
1015 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1016 I40E_GLPRT_PTC1522L(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->tx_size_1522, &nsd->tx_size_1522);
1019 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1020 I40E_GLPRT_PTC9522L(hw->port),
1021 pf->stat_offsets_loaded,
1022 &osd->tx_size_big, &nsd->tx_size_big);
1024 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->rx_undersize, &nsd->rx_undersize);
1027 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->rx_fragments, &nsd->rx_fragments);
1030 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->rx_oversize, &nsd->rx_oversize);
1033 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->rx_jabber, &nsd->rx_jabber);
1038 i40e_stat_update_and_clear32(hw,
1039 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1040 &nsd->fd_atr_match);
1041 i40e_stat_update_and_clear32(hw,
1042 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1044 i40e_stat_update_and_clear32(hw,
1045 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1046 &nsd->fd_atr_tunnel_match);
1048 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1049 nsd->tx_lpi_status =
1050 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1051 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1052 nsd->rx_lpi_status =
1053 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1054 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1055 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1056 pf->stat_offsets_loaded,
1057 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1058 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1059 pf->stat_offsets_loaded,
1060 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1062 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1063 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1064 nsd->fd_sb_status = true;
1066 nsd->fd_sb_status = false;
1068 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1069 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1070 nsd->fd_atr_status = true;
1072 nsd->fd_atr_status = false;
1074 pf->stat_offsets_loaded = true;
1078 * i40e_update_stats - Update the various statistics counters.
1079 * @vsi: the VSI to be updated
1081 * Update the various stats for this VSI and its related entities.
1083 void i40e_update_stats(struct i40e_vsi *vsi)
1085 struct i40e_pf *pf = vsi->back;
1087 if (vsi == pf->vsi[pf->lan_vsi])
1088 i40e_update_pf_stats(pf);
1090 i40e_update_vsi_stats(vsi);
1094 * i40e_count_filters - counts VSI mac filters
1095 * @vsi: the VSI to be searched
1097 * Returns count of mac filters
1099 int i40e_count_filters(struct i40e_vsi *vsi)
1101 struct i40e_mac_filter *f;
1102 struct hlist_node *h;
1106 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1113 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1114 * @vsi: the VSI to be searched
1115 * @macaddr: the MAC address
1118 * Returns ptr to the filter object or NULL
1120 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1121 const u8 *macaddr, s16 vlan)
1123 struct i40e_mac_filter *f;
1126 if (!vsi || !macaddr)
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1139 * i40e_find_mac - Find a mac addr in the macvlan filters list
1140 * @vsi: the VSI to be searched
1141 * @macaddr: the MAC address we are searching for
1143 * Returns the first filter with the provided MAC address or NULL if
1144 * MAC address was not found
1146 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1148 struct i40e_mac_filter *f;
1151 if (!vsi || !macaddr)
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1156 if ((ether_addr_equal(macaddr, f->macaddr)))
1163 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1164 * @vsi: the VSI to be searched
1166 * Returns true if VSI is in vlan mode or false otherwise
1168 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1170 /* If we have a PVID, always operate in VLAN mode */
1174 /* We need to operate in VLAN mode whenever we have any filters with
1175 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1176 * time, incurring search cost repeatedly. However, we can notice two
1179 * 1) the only place where we can gain a VLAN filter is in
1182 * 2) the only place where filters are actually removed is in
1183 * i40e_sync_filters_subtask.
1185 * Thus, we can simply use a boolean value, has_vlan_filters which we
1186 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1187 * we have to perform the full search after deleting filters in
1188 * i40e_sync_filters_subtask, but we already have to search
1189 * filters here and can perform the check at the same time. This
1190 * results in avoiding embedding a loop for VLAN mode inside another
1191 * loop over all the filters, and should maintain correctness as noted
1194 return vsi->has_vlan_filter;
1198 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1199 * @vsi: the VSI to configure
1200 * @tmp_add_list: list of filters ready to be added
1201 * @tmp_del_list: list of filters ready to be deleted
1202 * @vlan_filters: the number of active VLAN filters
1204 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1205 * behave as expected. If we have any active VLAN filters remaining or about
1206 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1207 * so that they only match against untagged traffic. If we no longer have any
1208 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1209 * so that they match against both tagged and untagged traffic. In this way,
1210 * we ensure that we correctly receive the desired traffic. This ensures that
1211 * when we have an active VLAN we will receive only untagged traffic and
1212 * traffic matching active VLANs. If we have no active VLANs then we will
1213 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1215 * Finally, in a similar fashion, this function also corrects filters when
1216 * there is an active PVID assigned to this VSI.
1218 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1220 * This function is only expected to be called from within
1221 * i40e_sync_vsi_filters.
1223 * NOTE: This function expects to be called while under the
1224 * mac_filter_hash_lock
1226 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
1232 struct i40e_mac_filter *f, *add_head;
1233 struct i40e_new_mac_filter *new;
1234 struct hlist_node *h;
1237 /* To determine if a particular filter needs to be replaced we
1238 * have the three following conditions:
1240 * a) if we have a PVID assigned, then all filters which are
1241 * not marked as VLAN=PVID must be replaced with filters that
1243 * b) otherwise, if we have any active VLANS, all filters
1244 * which are marked as VLAN=-1 must be replaced with
1245 * filters marked as VLAN=0
1246 * c) finally, if we do not have any active VLANS, all filters
1247 * which are marked as VLAN=0 must be replaced with filters
1251 /* Update the filters about to be added in place */
1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
1261 /* Update the remaining active filters */
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263 /* Combine the checks for whether a filter needs to be changed
1264 * and then determine the new VLAN inside the if block, in
1265 * order to avoid duplicating code for adding the new filter
1266 * then deleting the old filter.
1268 if ((pvid && f->vlan != pvid) ||
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271 /* Determine the new vlan we will be adding */
1274 else if (vlan_filters)
1277 new_vlan = I40E_VLAN_ANY;
1279 /* Create the new filter */
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1284 /* Create a temporary i40e_new_mac_filter */
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1290 new->state = add_head->state;
1292 /* Add the new filter to the tmp list */
1293 hlist_add_head(&new->hlist, tmp_add_list);
1295 /* Put the original filter into the delete list */
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1302 vsi->has_vlan_filter = !!vlan_filters;
1308 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1309 * @vsi: the PF Main VSI - inappropriate for any other VSI
1310 * @macaddr: the MAC address
1312 * Remove whatever filter the firmware set up so the driver can manage
1313 * its own filtering intelligently.
1315 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1320 /* Only appropriate for the PF main VSI */
1321 if (vsi->type != I40E_VSI_MAIN)
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327 /* Ignore error returns, some firmware does it this way... */
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334 /* ...and some firmware does it this way. */
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1341 * i40e_add_filter - Add a mac/vlan filter to the VSI
1342 * @vsi: the VSI to be searched
1343 * @macaddr: the MAC address
1346 * Returns ptr to the filter object or NULL when no memory available.
1348 * NOTE: This function is expected to be called with mac_filter_hash_lock
1351 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1352 const u8 *macaddr, s16 vlan)
1354 struct i40e_mac_filter *f;
1357 if (!vsi || !macaddr)
1360 f = i40e_find_filter(vsi, macaddr, vlan);
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1366 /* Update the boolean indicating if we need to function in
1370 vsi->has_vlan_filter = true;
1372 ether_addr_copy(f->macaddr, macaddr);
1374 f->state = I40E_FILTER_NEW;
1375 INIT_HLIST_NODE(&f->hlist);
1377 key = i40e_addr_to_hkey(macaddr);
1378 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1384 /* If we're asked to add a filter that has been marked for removal, it
1385 * is safe to simply restore it to active state. __i40e_del_filter
1386 * will have simply deleted any filters which were previously marked
1387 * NEW or FAILED, so if it is currently marked REMOVE it must have
1388 * previously been ACTIVE. Since we haven't yet run the sync filters
1389 * task, just restore this filter to the ACTIVE state so that the
1390 * sync task leaves it in place
1392 if (f->state == I40E_FILTER_REMOVE)
1393 f->state = I40E_FILTER_ACTIVE;
1399 * __i40e_del_filter - Remove a specific filter from the VSI
1400 * @vsi: VSI to remove from
1401 * @f: the filter to remove from the list
1403 * This function should be called instead of i40e_del_filter only if you know
1404 * the exact filter you will remove already, such as via i40e_find_filter or
1407 * NOTE: This function is expected to be called with mac_filter_hash_lock
1409 * ANOTHER NOTE: This function MUST be called from within the context of
1410 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1411 * instead of list_for_each_entry().
1413 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1418 /* If the filter was never added to firmware then we can just delete it
1419 * directly and we don't want to set the status to remove or else an
1420 * admin queue command will unnecessarily fire.
1422 if ((f->state == I40E_FILTER_FAILED) ||
1423 (f->state == I40E_FILTER_NEW)) {
1424 hash_del(&f->hlist);
1427 f->state = I40E_FILTER_REMOVE;
1430 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1431 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1435 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1436 * @vsi: the VSI to be searched
1437 * @macaddr: the MAC address
1440 * NOTE: This function is expected to be called with mac_filter_hash_lock
1442 * ANOTHER NOTE: This function MUST be called from within the context of
1443 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1444 * instead of list_for_each_entry().
1446 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1448 struct i40e_mac_filter *f;
1450 if (!vsi || !macaddr)
1453 f = i40e_find_filter(vsi, macaddr, vlan);
1454 __i40e_del_filter(vsi, f);
1458 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1459 * @vsi: the VSI to be searched
1460 * @macaddr: the mac address to be filtered
1462 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1463 * go through all the macvlan filters and add a macvlan filter for each
1464 * unique vlan that already exists. If a PVID has been assigned, instead only
1465 * add the macaddr to that VLAN.
1467 * Returns last filter added on success, else NULL
1469 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1472 struct i40e_mac_filter *f, *add = NULL;
1473 struct hlist_node *h;
1477 return i40e_add_filter(vsi, macaddr,
1478 le16_to_cpu(vsi->info.pvid));
1480 if (!i40e_is_vsi_in_vlan(vsi))
1481 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1483 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1484 if (f->state == I40E_FILTER_REMOVE)
1486 add = i40e_add_filter(vsi, macaddr, f->vlan);
1495 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1496 * @vsi: the VSI to be searched
1497 * @macaddr: the mac address to be removed
1499 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1502 * Returns 0 for success, or error
1504 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1506 struct i40e_mac_filter *f;
1507 struct hlist_node *h;
1511 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1512 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1513 if (ether_addr_equal(macaddr, f->macaddr)) {
1514 __i40e_del_filter(vsi, f);
1526 * i40e_set_mac - NDO callback to set mac address
1527 * @netdev: network interface device structure
1528 * @p: pointer to an address structure
1530 * Returns 0 on success, negative on failure
1532 static int i40e_set_mac(struct net_device *netdev, void *p)
1534 struct i40e_netdev_priv *np = netdev_priv(netdev);
1535 struct i40e_vsi *vsi = np->vsi;
1536 struct i40e_pf *pf = vsi->back;
1537 struct i40e_hw *hw = &pf->hw;
1538 struct sockaddr *addr = p;
1540 if (!is_valid_ether_addr(addr->sa_data))
1541 return -EADDRNOTAVAIL;
1543 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1544 netdev_info(netdev, "already using mac address %pM\n",
1549 if (test_bit(__I40E_DOWN, pf->state) ||
1550 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1551 return -EADDRNOTAVAIL;
1553 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1554 netdev_info(netdev, "returning to hw mac address %pM\n",
1557 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1559 /* Copy the address first, so that we avoid a possible race with
1561 * - Remove old address from MAC filter
1562 * - Copy new address
1563 * - Add new address to MAC filter
1565 spin_lock_bh(&vsi->mac_filter_hash_lock);
1566 i40e_del_mac_filter(vsi, netdev->dev_addr);
1567 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1568 i40e_add_mac_filter(vsi, netdev->dev_addr);
1569 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1571 if (vsi->type == I40E_VSI_MAIN) {
1574 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1575 addr->sa_data, NULL);
1577 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1578 i40e_stat_str(hw, ret),
1579 i40e_aq_str(hw, hw->aq.asq_last_status));
1582 /* schedule our worker thread which will take care of
1583 * applying the new filter changes
1585 i40e_service_event_schedule(pf);
1590 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1591 * @vsi: vsi structure
1592 * @seed: RSS hash seed
1594 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1595 u8 *lut, u16 lut_size)
1597 struct i40e_pf *pf = vsi->back;
1598 struct i40e_hw *hw = &pf->hw;
1602 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1603 (struct i40e_aqc_get_set_rss_key_data *)seed;
1604 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1606 dev_info(&pf->pdev->dev,
1607 "Cannot set RSS key, err %s aq_err %s\n",
1608 i40e_stat_str(hw, ret),
1609 i40e_aq_str(hw, hw->aq.asq_last_status));
1614 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1616 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1618 dev_info(&pf->pdev->dev,
1619 "Cannot set RSS lut, err %s aq_err %s\n",
1620 i40e_stat_str(hw, ret),
1621 i40e_aq_str(hw, hw->aq.asq_last_status));
1629 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1630 * @vsi: VSI structure
1632 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1634 struct i40e_pf *pf = vsi->back;
1635 u8 seed[I40E_HKEY_ARRAY_SIZE];
1639 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1642 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1643 vsi->num_queue_pairs);
1646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1650 /* Use the user configured hash keys and lookup table if there is one,
1651 * otherwise use default
1653 if (vsi->rss_lut_user)
1654 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1656 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1657 if (vsi->rss_hkey_user)
1658 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1660 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1661 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1667 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1668 * @vsi: the VSI being configured,
1669 * @ctxt: VSI context structure
1670 * @enabled_tc: number of traffic classes to enable
1672 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1674 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1675 struct i40e_vsi_context *ctxt,
1678 u16 qcount = 0, max_qcount, qmap, sections = 0;
1679 int i, override_q, pow, num_qps, ret;
1680 u8 netdev_tc = 0, offset = 0;
1682 if (vsi->type != I40E_VSI_MAIN)
1684 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1686 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1687 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1688 num_qps = vsi->mqprio_qopt.qopt.count[0];
1690 /* find the next higher power-of-2 of num queue pairs */
1691 pow = ilog2(num_qps);
1692 if (!is_power_of_2(num_qps))
1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1697 /* Setup queue offset/count for all TCs for given VSI */
1698 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1699 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1700 /* See if the given TC is enabled for the given VSI */
1701 if (vsi->tc_config.enabled_tc & BIT(i)) {
1702 offset = vsi->mqprio_qopt.qopt.offset[i];
1703 qcount = vsi->mqprio_qopt.qopt.count[i];
1704 if (qcount > max_qcount)
1705 max_qcount = qcount;
1706 vsi->tc_config.tc_info[i].qoffset = offset;
1707 vsi->tc_config.tc_info[i].qcount = qcount;
1708 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1710 /* TC is not enabled so set the offset to
1711 * default queue and allocate one queue
1714 vsi->tc_config.tc_info[i].qoffset = 0;
1715 vsi->tc_config.tc_info[i].qcount = 1;
1716 vsi->tc_config.tc_info[i].netdev_tc = 0;
1720 /* Set actual Tx/Rx queue pairs */
1721 vsi->num_queue_pairs = offset + qcount;
1723 /* Setup queue TC[0].qmap for given VSI context */
1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1725 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1726 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1727 ctxt->info.valid_sections |= cpu_to_le16(sections);
1729 /* Reconfigure RSS for main VSI with max queue count */
1730 vsi->rss_size = max_qcount;
1731 ret = i40e_vsi_config_rss(vsi);
1733 dev_info(&vsi->back->pdev->dev,
1734 "Failed to reconfig rss for num_queues (%u)\n",
1738 vsi->reconfig_rss = true;
1739 dev_dbg(&vsi->back->pdev->dev,
1740 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1742 /* Find queue count available for channel VSIs and starting offset
1745 override_q = vsi->mqprio_qopt.qopt.count[0];
1746 if (override_q && override_q < vsi->num_queue_pairs) {
1747 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1748 vsi->next_base_queue = override_q;
1754 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1755 * @vsi: the VSI being setup
1756 * @ctxt: VSI context structure
1757 * @enabled_tc: Enabled TCs bitmap
1758 * @is_add: True if called before Add VSI
1760 * Setup VSI queue mapping for enabled traffic classes.
1762 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1763 struct i40e_vsi_context *ctxt,
1767 struct i40e_pf *pf = vsi->back;
1777 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1780 /* Number of queues per enabled TC */
1781 num_tc_qps = vsi->alloc_queue_pairs;
1782 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1783 /* Find numtc from enabled TC bitmap */
1784 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1785 if (enabled_tc & BIT(i)) /* TC is enabled */
1789 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1792 num_tc_qps = num_tc_qps / numtc;
1793 num_tc_qps = min_t(int, num_tc_qps,
1794 i40e_pf_get_max_q_per_tc(pf));
1797 vsi->tc_config.numtc = numtc;
1798 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1800 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1801 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1802 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1804 /* Setup queue offset/count for all TCs for given VSI */
1805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1806 /* See if the given TC is enabled for the given VSI */
1807 if (vsi->tc_config.enabled_tc & BIT(i)) {
1811 switch (vsi->type) {
1813 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1814 I40E_FLAG_FD_ATR_ENABLED)) ||
1815 vsi->tc_config.enabled_tc != 1) {
1816 qcount = min_t(int, pf->alloc_rss_size,
1822 case I40E_VSI_SRIOV:
1823 case I40E_VSI_VMDQ2:
1825 qcount = num_tc_qps;
1829 vsi->tc_config.tc_info[i].qoffset = offset;
1830 vsi->tc_config.tc_info[i].qcount = qcount;
1832 /* find the next higher power-of-2 of num queue pairs */
1835 while (num_qps && (BIT_ULL(pow) < qcount)) {
1840 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1842 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1843 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1847 /* TC is not enabled so set the offset to
1848 * default queue and allocate one queue
1851 vsi->tc_config.tc_info[i].qoffset = 0;
1852 vsi->tc_config.tc_info[i].qcount = 1;
1853 vsi->tc_config.tc_info[i].netdev_tc = 0;
1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1860 /* Set actual Tx/Rx queue pairs */
1861 vsi->num_queue_pairs = offset;
1862 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1863 if (vsi->req_queue_pairs > 0)
1864 vsi->num_queue_pairs = vsi->req_queue_pairs;
1865 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1866 vsi->num_queue_pairs = pf->num_lan_msix;
1869 /* Scheduler section valid can only be set for ADD VSI */
1871 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1873 ctxt->info.up_enable_bits = enabled_tc;
1875 if (vsi->type == I40E_VSI_SRIOV) {
1876 ctxt->info.mapping_flags |=
1877 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1878 for (i = 0; i < vsi->num_queue_pairs; i++)
1879 ctxt->info.queue_mapping[i] =
1880 cpu_to_le16(vsi->base_queue + i);
1882 ctxt->info.mapping_flags |=
1883 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1884 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1886 ctxt->info.valid_sections |= cpu_to_le16(sections);
1890 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1891 * @netdev: the netdevice
1892 * @addr: address to add
1894 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1895 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1897 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1899 struct i40e_netdev_priv *np = netdev_priv(netdev);
1900 struct i40e_vsi *vsi = np->vsi;
1902 if (i40e_add_mac_filter(vsi, addr))
1909 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1910 * @netdev: the netdevice
1911 * @addr: address to add
1913 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1914 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1916 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1921 /* Under some circumstances, we might receive a request to delete
1922 * our own device address from our uc list. Because we store the
1923 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1924 * such requests and not delete our device address from this list.
1926 if (ether_addr_equal(addr, netdev->dev_addr))
1929 i40e_del_mac_filter(vsi, addr);
1935 * i40e_set_rx_mode - NDO callback to set the netdev filters
1936 * @netdev: network interface device structure
1938 static void i40e_set_rx_mode(struct net_device *netdev)
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1943 spin_lock_bh(&vsi->mac_filter_hash_lock);
1945 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1948 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1950 /* check for other flag changes */
1951 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1952 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1953 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1958 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1959 * @vsi: Pointer to VSI struct
1960 * @from: Pointer to list which contains MAC filter entries - changes to
1961 * those entries needs to be undone.
1963 * MAC filter entries from this list were slated for deletion.
1965 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1966 struct hlist_head *from)
1968 struct i40e_mac_filter *f;
1969 struct hlist_node *h;
1971 hlist_for_each_entry_safe(f, h, from, hlist) {
1972 u64 key = i40e_addr_to_hkey(f->macaddr);
1974 /* Move the element back into MAC filter list*/
1975 hlist_del(&f->hlist);
1976 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1981 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1982 * @vsi: Pointer to vsi struct
1983 * @from: Pointer to list which contains MAC filter entries - changes to
1984 * those entries needs to be undone.
1986 * MAC filter entries from this list were slated for addition.
1988 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1989 struct hlist_head *from)
1991 struct i40e_new_mac_filter *new;
1992 struct hlist_node *h;
1994 hlist_for_each_entry_safe(new, h, from, hlist) {
1995 /* We can simply free the wrapper structure */
1996 hlist_del(&new->hlist);
2002 * i40e_next_entry - Get the next non-broadcast filter from a list
2003 * @next: pointer to filter in list
2005 * Returns the next non-broadcast filter in the list. Required so that we
2006 * ignore broadcast filters within the list, since these are not handled via
2007 * the normal firmware update path.
2010 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2012 hlist_for_each_entry_continue(next, hlist) {
2013 if (!is_broadcast_ether_addr(next->f->macaddr))
2021 * i40e_update_filter_state - Update filter state based on return data
2023 * @count: Number of filters added
2024 * @add_list: return data from fw
2025 * @add_head: pointer to first filter in current batch
2027 * MAC filter entries from list were slated to be added to device. Returns
2028 * number of successful filters. Note that 0 does NOT mean success!
2031 i40e_update_filter_state(int count,
2032 struct i40e_aqc_add_macvlan_element_data *add_list,
2033 struct i40e_new_mac_filter *add_head)
2038 for (i = 0; i < count; i++) {
2039 /* Always check status of each filter. We don't need to check
2040 * the firmware return status because we pre-set the filter
2041 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2042 * request to the adminq. Thus, if it no longer matches then
2043 * we know the filter is active.
2045 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2046 add_head->state = I40E_FILTER_FAILED;
2048 add_head->state = I40E_FILTER_ACTIVE;
2052 add_head = i40e_next_filter(add_head);
2061 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2062 * @vsi: ptr to the VSI
2063 * @vsi_name: name to display in messages
2064 * @list: the list of filters to send to firmware
2065 * @num_del: the number of filters to delete
2066 * @retval: Set to -EIO on failure to delete
2068 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2069 * *retval instead of a return value so that success does not force ret_val to
2070 * be set to 0. This ensures that a sequence of calls to this function
2071 * preserve the previous value of *retval on successful delete.
2074 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2075 struct i40e_aqc_remove_macvlan_element_data *list,
2076 int num_del, int *retval)
2078 struct i40e_hw *hw = &vsi->back->hw;
2082 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2083 aq_err = hw->aq.asq_last_status;
2085 /* Explicitly ignore and do not report when firmware returns ENOENT */
2086 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2088 dev_info(&vsi->back->pdev->dev,
2089 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2090 vsi_name, i40e_stat_str(hw, aq_ret),
2091 i40e_aq_str(hw, aq_err));
2096 * i40e_aqc_add_filters - Request firmware to add a set of filters
2097 * @vsi: ptr to the VSI
2098 * @vsi_name: name to display in messages
2099 * @list: the list of filters to send to firmware
2100 * @add_head: Position in the add hlist
2101 * @num_add: the number of filters to add
2103 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2104 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2105 * space for more filters.
2108 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2109 struct i40e_aqc_add_macvlan_element_data *list,
2110 struct i40e_new_mac_filter *add_head,
2113 struct i40e_hw *hw = &vsi->back->hw;
2116 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2117 aq_err = hw->aq.asq_last_status;
2118 fcnt = i40e_update_filter_state(num_add, list, add_head);
2120 if (fcnt != num_add) {
2121 if (vsi->type == I40E_VSI_MAIN) {
2122 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2123 dev_warn(&vsi->back->pdev->dev,
2124 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2125 i40e_aq_str(hw, aq_err), vsi_name);
2126 } else if (vsi->type == I40E_VSI_SRIOV ||
2127 vsi->type == I40E_VSI_VMDQ1 ||
2128 vsi->type == I40E_VSI_VMDQ2) {
2129 dev_warn(&vsi->back->pdev->dev,
2130 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2131 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2133 dev_warn(&vsi->back->pdev->dev,
2134 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2135 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2141 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2142 * @vsi: pointer to the VSI
2143 * @vsi_name: the VSI name
2146 * This function sets or clears the promiscuous broadcast flags for VLAN
2147 * filters in order to properly receive broadcast frames. Assumes that only
2148 * broadcast filters are passed.
2150 * Returns status indicating success or failure;
2153 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2154 struct i40e_mac_filter *f)
2156 bool enable = f->state == I40E_FILTER_NEW;
2157 struct i40e_hw *hw = &vsi->back->hw;
2160 if (f->vlan == I40E_VLAN_ANY) {
2161 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2166 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2174 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2175 dev_warn(&vsi->back->pdev->dev,
2176 "Error %s, forcing overflow promiscuous on %s\n",
2177 i40e_aq_str(hw, hw->aq.asq_last_status),
2185 * i40e_set_promiscuous - set promiscuous mode
2186 * @pf: board private structure
2187 * @promisc: promisc on or off
2189 * There are different ways of setting promiscuous mode on a PF depending on
2190 * what state/environment we're in. This identifies and sets it appropriately.
2191 * Returns 0 on success.
2193 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2195 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2196 struct i40e_hw *hw = &pf->hw;
2199 if (vsi->type == I40E_VSI_MAIN &&
2200 pf->lan_veb != I40E_NO_VEB &&
2201 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2202 /* set defport ON for Main VSI instead of true promisc
2203 * this way we will get all unicast/multicast and VLAN
2204 * promisc behavior but will not get VF or VMDq traffic
2205 * replicated on the Main VSI.
2208 aq_ret = i40e_aq_set_default_vsi(hw,
2212 aq_ret = i40e_aq_clear_default_vsi(hw,
2216 dev_info(&pf->pdev->dev,
2217 "Set default VSI failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2222 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2228 dev_info(&pf->pdev->dev,
2229 "set unicast promisc failed, err %s, aq_err %s\n",
2230 i40e_stat_str(hw, aq_ret),
2231 i40e_aq_str(hw, hw->aq.asq_last_status));
2233 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2238 dev_info(&pf->pdev->dev,
2239 "set multicast promisc failed, err %s, aq_err %s\n",
2240 i40e_stat_str(hw, aq_ret),
2241 i40e_aq_str(hw, hw->aq.asq_last_status));
2246 pf->cur_promisc = promisc;
2252 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2253 * @vsi: ptr to the VSI
2255 * Push any outstanding VSI filter changes through the AdminQ.
2257 * Returns 0 or error value
2259 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2261 struct hlist_head tmp_add_list, tmp_del_list;
2262 struct i40e_mac_filter *f;
2263 struct i40e_new_mac_filter *new, *add_head = NULL;
2264 struct i40e_hw *hw = &vsi->back->hw;
2265 bool old_overflow, new_overflow;
2266 unsigned int failed_filters = 0;
2267 unsigned int vlan_filters = 0;
2268 char vsi_name[16] = "PF";
2269 int filter_list_len = 0;
2270 i40e_status aq_ret = 0;
2271 u32 changed_flags = 0;
2272 struct hlist_node *h;
2281 /* empty array typed pointers, kcalloc later */
2282 struct i40e_aqc_add_macvlan_element_data *add_list;
2283 struct i40e_aqc_remove_macvlan_element_data *del_list;
2285 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2286 usleep_range(1000, 2000);
2289 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2292 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2293 vsi->current_netdev_flags = vsi->netdev->flags;
2296 INIT_HLIST_HEAD(&tmp_add_list);
2297 INIT_HLIST_HEAD(&tmp_del_list);
2299 if (vsi->type == I40E_VSI_SRIOV)
2300 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2301 else if (vsi->type != I40E_VSI_MAIN)
2302 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2304 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2305 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2307 spin_lock_bh(&vsi->mac_filter_hash_lock);
2308 /* Create a list of filters to delete. */
2309 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2310 if (f->state == I40E_FILTER_REMOVE) {
2311 /* Move the element into temporary del_list */
2312 hash_del(&f->hlist);
2313 hlist_add_head(&f->hlist, &tmp_del_list);
2315 /* Avoid counting removed filters */
2318 if (f->state == I40E_FILTER_NEW) {
2319 /* Create a temporary i40e_new_mac_filter */
2320 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2322 goto err_no_memory_locked;
2324 /* Store pointer to the real filter */
2326 new->state = f->state;
2328 /* Add it to the hash list */
2329 hlist_add_head(&new->hlist, &tmp_add_list);
2332 /* Count the number of active (current and new) VLAN
2333 * filters we have now. Does not count filters which
2334 * are marked for deletion.
2340 retval = i40e_correct_mac_vlan_filters(vsi,
2345 goto err_no_memory_locked;
2347 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2350 /* Now process 'del_list' outside the lock */
2351 if (!hlist_empty(&tmp_del_list)) {
2352 filter_list_len = hw->aq.asq_buf_size /
2353 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2354 list_size = filter_list_len *
2355 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2356 del_list = kzalloc(list_size, GFP_ATOMIC);
2360 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2363 /* handle broadcast filters by updating the broadcast
2364 * promiscuous flag and release filter list.
2366 if (is_broadcast_ether_addr(f->macaddr)) {
2367 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2369 hlist_del(&f->hlist);
2374 /* add to delete list */
2375 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2376 if (f->vlan == I40E_VLAN_ANY) {
2377 del_list[num_del].vlan_tag = 0;
2378 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2380 del_list[num_del].vlan_tag =
2381 cpu_to_le16((u16)(f->vlan));
2384 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2385 del_list[num_del].flags = cmd_flags;
2388 /* flush a full buffer */
2389 if (num_del == filter_list_len) {
2390 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2392 memset(del_list, 0, list_size);
2395 /* Release memory for MAC filter entries which were
2396 * synced up with HW.
2398 hlist_del(&f->hlist);
2403 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2411 if (!hlist_empty(&tmp_add_list)) {
2412 /* Do all the adds now. */
2413 filter_list_len = hw->aq.asq_buf_size /
2414 sizeof(struct i40e_aqc_add_macvlan_element_data);
2415 list_size = filter_list_len *
2416 sizeof(struct i40e_aqc_add_macvlan_element_data);
2417 add_list = kzalloc(list_size, GFP_ATOMIC);
2422 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2423 /* handle broadcast filters by updating the broadcast
2424 * promiscuous flag instead of adding a MAC filter.
2426 if (is_broadcast_ether_addr(new->f->macaddr)) {
2427 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2429 new->state = I40E_FILTER_FAILED;
2431 new->state = I40E_FILTER_ACTIVE;
2435 /* add to add array */
2439 ether_addr_copy(add_list[num_add].mac_addr,
2441 if (new->f->vlan == I40E_VLAN_ANY) {
2442 add_list[num_add].vlan_tag = 0;
2443 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2445 add_list[num_add].vlan_tag =
2446 cpu_to_le16((u16)(new->f->vlan));
2448 add_list[num_add].queue_number = 0;
2449 /* set invalid match method for later detection */
2450 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2451 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2452 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2455 /* flush a full buffer */
2456 if (num_add == filter_list_len) {
2457 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2459 memset(add_list, 0, list_size);
2464 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2467 /* Now move all of the filters from the temp add list back to
2470 spin_lock_bh(&vsi->mac_filter_hash_lock);
2471 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2472 /* Only update the state if we're still NEW */
2473 if (new->f->state == I40E_FILTER_NEW)
2474 new->f->state = new->state;
2475 hlist_del(&new->hlist);
2478 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2483 /* Determine the number of active and failed filters. */
2484 spin_lock_bh(&vsi->mac_filter_hash_lock);
2485 vsi->active_filters = 0;
2486 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2487 if (f->state == I40E_FILTER_ACTIVE)
2488 vsi->active_filters++;
2489 else if (f->state == I40E_FILTER_FAILED)
2492 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2494 /* Check if we are able to exit overflow promiscuous mode. We can
2495 * safely exit if we didn't just enter, we no longer have any failed
2496 * filters, and we have reduced filters below the threshold value.
2498 if (old_overflow && !failed_filters &&
2499 vsi->active_filters < vsi->promisc_threshold) {
2500 dev_info(&pf->pdev->dev,
2501 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2503 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2504 vsi->promisc_threshold = 0;
2507 /* if the VF is not trusted do not do promisc */
2508 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2509 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2513 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2515 /* If we are entering overflow promiscuous, we need to calculate a new
2516 * threshold for when we are safe to exit
2518 if (!old_overflow && new_overflow)
2519 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2521 /* check for changes in promiscuous modes */
2522 if (changed_flags & IFF_ALLMULTI) {
2523 bool cur_multipromisc;
2525 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2526 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2531 retval = i40e_aq_rc_to_posix(aq_ret,
2532 hw->aq.asq_last_status);
2533 dev_info(&pf->pdev->dev,
2534 "set multi promisc failed on %s, err %s aq_err %s\n",
2536 i40e_stat_str(hw, aq_ret),
2537 i40e_aq_str(hw, hw->aq.asq_last_status));
2539 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2541 cur_multipromisc ? "entering" : "leaving");
2545 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2548 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2550 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2552 retval = i40e_aq_rc_to_posix(aq_ret,
2553 hw->aq.asq_last_status);
2554 dev_info(&pf->pdev->dev,
2555 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2556 cur_promisc ? "on" : "off",
2558 i40e_stat_str(hw, aq_ret),
2559 i40e_aq_str(hw, hw->aq.asq_last_status));
2563 /* if something went wrong then set the changed flag so we try again */
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2567 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2571 /* Restore elements on the temporary add and delete lists */
2572 spin_lock_bh(&vsi->mac_filter_hash_lock);
2573 err_no_memory_locked:
2574 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2575 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2576 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2578 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2579 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2584 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2585 * @pf: board private structure
2587 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2593 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2595 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2596 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2600 for (v = 0; v < pf->num_alloc_vsi; v++) {
2602 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2603 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2606 /* come back and try again later */
2607 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2613 clear_bit(__I40E_VF_DISABLE, pf->state);
2617 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2620 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2622 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2623 return I40E_RXBUFFER_2048;
2625 return I40E_RXBUFFER_3072;
2629 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2630 * @netdev: network interface device structure
2631 * @new_mtu: new value for maximum frame size
2633 * Returns 0 on success, negative on failure
2635 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2637 struct i40e_netdev_priv *np = netdev_priv(netdev);
2638 struct i40e_vsi *vsi = np->vsi;
2639 struct i40e_pf *pf = vsi->back;
2641 if (i40e_enabled_xdp_vsi(vsi)) {
2642 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2644 if (frame_size > i40e_max_xdp_frame_size(vsi))
2648 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2649 netdev->mtu, new_mtu);
2650 netdev->mtu = new_mtu;
2651 if (netif_running(netdev))
2652 i40e_vsi_reinit_locked(vsi);
2653 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2654 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2659 * i40e_ioctl - Access the hwtstamp interface
2660 * @netdev: network interface device structure
2661 * @ifr: interface request data
2662 * @cmd: ioctl command
2664 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2666 struct i40e_netdev_priv *np = netdev_priv(netdev);
2667 struct i40e_pf *pf = np->vsi->back;
2671 return i40e_ptp_get_ts_config(pf, ifr);
2673 return i40e_ptp_set_ts_config(pf, ifr);
2680 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2681 * @vsi: the vsi being adjusted
2683 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2685 struct i40e_vsi_context ctxt;
2688 /* Don't modify stripping options if a port VLAN is active */
2692 if ((vsi->info.valid_sections &
2693 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2694 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2695 return; /* already enabled */
2697 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2698 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2699 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2701 ctxt.seid = vsi->seid;
2702 ctxt.info = vsi->info;
2703 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2705 dev_info(&vsi->back->pdev->dev,
2706 "update vlan stripping failed, err %s aq_err %s\n",
2707 i40e_stat_str(&vsi->back->hw, ret),
2708 i40e_aq_str(&vsi->back->hw,
2709 vsi->back->hw.aq.asq_last_status));
2714 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2715 * @vsi: the vsi being adjusted
2717 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2719 struct i40e_vsi_context ctxt;
2722 /* Don't modify stripping options if a port VLAN is active */
2726 if ((vsi->info.valid_sections &
2727 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2728 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2729 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2730 return; /* already disabled */
2732 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2733 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2734 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2736 ctxt.seid = vsi->seid;
2737 ctxt.info = vsi->info;
2738 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2740 dev_info(&vsi->back->pdev->dev,
2741 "update vlan stripping failed, err %s aq_err %s\n",
2742 i40e_stat_str(&vsi->back->hw, ret),
2743 i40e_aq_str(&vsi->back->hw,
2744 vsi->back->hw.aq.asq_last_status));
2749 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2750 * @vsi: the vsi being configured
2751 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2753 * This is a helper function for adding a new MAC/VLAN filter with the
2754 * specified VLAN for each existing MAC address already in the hash table.
2755 * This function does *not* perform any accounting to update filters based on
2758 * NOTE: this function expects to be called while under the
2759 * mac_filter_hash_lock
2761 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2763 struct i40e_mac_filter *f, *add_f;
2764 struct hlist_node *h;
2767 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2768 if (f->state == I40E_FILTER_REMOVE)
2770 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2772 dev_info(&vsi->back->pdev->dev,
2773 "Could not add vlan filter %d for %pM\n",
2783 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2784 * @vsi: the VSI being configured
2785 * @vid: VLAN id to be added
2787 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2794 /* The network stack will attempt to add VID=0, with the intention to
2795 * receive priority tagged packets with a VLAN of 0. Our HW receives
2796 * these packets by default when configured to receive untagged
2797 * packets, so we don't need to add a filter for this case.
2798 * Additionally, HW interprets adding a VID=0 filter as meaning to
2799 * receive *only* tagged traffic and stops receiving untagged traffic.
2800 * Thus, we do not want to actually add a filter for VID=0
2805 /* Locked once because all functions invoked below iterates list*/
2806 spin_lock_bh(&vsi->mac_filter_hash_lock);
2807 err = i40e_add_vlan_all_mac(vsi, vid);
2808 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2812 /* schedule our worker thread which will take care of
2813 * applying the new filter changes
2815 i40e_service_event_schedule(vsi->back);
2820 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2821 * @vsi: the vsi being configured
2822 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2824 * This function should be used to remove all VLAN filters which match the
2825 * given VID. It does not schedule the service event and does not take the
2826 * mac_filter_hash_lock so it may be combined with other operations under
2827 * a single invocation of the mac_filter_hash_lock.
2829 * NOTE: this function expects to be called while under the
2830 * mac_filter_hash_lock
2832 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2834 struct i40e_mac_filter *f;
2835 struct hlist_node *h;
2838 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2840 __i40e_del_filter(vsi, f);
2845 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2846 * @vsi: the VSI being configured
2847 * @vid: VLAN id to be removed
2849 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2851 if (!vid || vsi->info.pvid)
2854 spin_lock_bh(&vsi->mac_filter_hash_lock);
2855 i40e_rm_vlan_all_mac(vsi, vid);
2856 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2858 /* schedule our worker thread which will take care of
2859 * applying the new filter changes
2861 i40e_service_event_schedule(vsi->back);
2865 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2866 * @netdev: network interface to be adjusted
2867 * @proto: unused protocol value
2868 * @vid: vlan id to be added
2870 * net_device_ops implementation for adding vlan ids
2872 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2873 __always_unused __be16 proto, u16 vid)
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2879 if (vid >= VLAN_N_VID)
2882 ret = i40e_vsi_add_vlan(vsi, vid);
2884 set_bit(vid, vsi->active_vlans);
2890 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2891 * @netdev: network interface to be adjusted
2892 * @proto: unused protocol value
2893 * @vid: vlan id to be added
2895 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2896 __always_unused __be16 proto, u16 vid)
2898 struct i40e_netdev_priv *np = netdev_priv(netdev);
2899 struct i40e_vsi *vsi = np->vsi;
2901 if (vid >= VLAN_N_VID)
2903 set_bit(vid, vsi->active_vlans);
2907 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2908 * @netdev: network interface to be adjusted
2909 * @proto: unused protocol value
2910 * @vid: vlan id to be removed
2912 * net_device_ops implementation for removing vlan ids
2914 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2915 __always_unused __be16 proto, u16 vid)
2917 struct i40e_netdev_priv *np = netdev_priv(netdev);
2918 struct i40e_vsi *vsi = np->vsi;
2920 /* return code is ignored as there is nothing a user
2921 * can do about failure to remove and a log message was
2922 * already printed from the other function
2924 i40e_vsi_kill_vlan(vsi, vid);
2926 clear_bit(vid, vsi->active_vlans);
2932 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2933 * @vsi: the vsi being brought back up
2935 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2942 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2943 i40e_vlan_stripping_enable(vsi);
2945 i40e_vlan_stripping_disable(vsi);
2947 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2948 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2953 * i40e_vsi_add_pvid - Add pvid for the VSI
2954 * @vsi: the vsi being adjusted
2955 * @vid: the vlan id to set as a PVID
2957 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2959 struct i40e_vsi_context ctxt;
2962 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2963 vsi->info.pvid = cpu_to_le16(vid);
2964 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2965 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2966 I40E_AQ_VSI_PVLAN_EMOD_STR;
2968 ctxt.seid = vsi->seid;
2969 ctxt.info = vsi->info;
2970 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2972 dev_info(&vsi->back->pdev->dev,
2973 "add pvid failed, err %s aq_err %s\n",
2974 i40e_stat_str(&vsi->back->hw, ret),
2975 i40e_aq_str(&vsi->back->hw,
2976 vsi->back->hw.aq.asq_last_status));
2984 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2985 * @vsi: the vsi being adjusted
2987 * Just use the vlan_rx_register() service to put it back to normal
2989 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2993 i40e_vlan_stripping_disable(vsi);
2997 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2998 * @vsi: ptr to the VSI
3000 * If this function returns with an error, then it's possible one or
3001 * more of the rings is populated (while the rest are not). It is the
3002 * callers duty to clean those orphaned rings.
3004 * Return 0 on success, negative on failure
3006 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3010 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3011 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3013 if (!i40e_enabled_xdp_vsi(vsi))
3016 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3017 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3023 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3024 * @vsi: ptr to the VSI
3026 * Free VSI's transmit software resources
3028 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3032 if (vsi->tx_rings) {
3033 for (i = 0; i < vsi->num_queue_pairs; i++)
3034 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3035 i40e_free_tx_resources(vsi->tx_rings[i]);
3038 if (vsi->xdp_rings) {
3039 for (i = 0; i < vsi->num_queue_pairs; i++)
3040 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3041 i40e_free_tx_resources(vsi->xdp_rings[i]);
3046 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3047 * @vsi: ptr to the VSI
3049 * If this function returns with an error, then it's possible one or
3050 * more of the rings is populated (while the rest are not). It is the
3051 * callers duty to clean those orphaned rings.
3053 * Return 0 on success, negative on failure
3055 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3059 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3060 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3065 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3066 * @vsi: ptr to the VSI
3068 * Free all receive software resources
3070 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3077 for (i = 0; i < vsi->num_queue_pairs; i++)
3078 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3079 i40e_free_rx_resources(vsi->rx_rings[i]);
3083 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3084 * @ring: The Tx ring to configure
3086 * This enables/disables XPS for a given Tx descriptor ring
3087 * based on the TCs enabled for the VSI that ring belongs to.
3089 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3093 if (!ring->q_vector || !ring->netdev || ring->ch)
3096 /* We only initialize XPS once, so as not to overwrite user settings */
3097 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3100 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3101 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3106 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3107 * @ring: The Tx or Rx ring
3109 * Returns the UMEM or NULL.
3111 static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3113 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3114 int qid = ring->queue_index;
3116 if (ring_is_xdp(ring))
3117 qid -= ring->vsi->alloc_queue_pairs;
3119 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3122 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3126 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3127 * @ring: The Tx ring to configure
3129 * Configure the Tx descriptor ring in the HMC context.
3131 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3133 struct i40e_vsi *vsi = ring->vsi;
3134 u16 pf_q = vsi->base_queue + ring->queue_index;
3135 struct i40e_hw *hw = &vsi->back->hw;
3136 struct i40e_hmc_obj_txq tx_ctx;
3137 i40e_status err = 0;
3140 if (ring_is_xdp(ring))
3141 ring->xsk_umem = i40e_xsk_umem(ring);
3143 /* some ATR related tx ring init */
3144 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3145 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3146 ring->atr_count = 0;
3148 ring->atr_sample_rate = 0;
3152 i40e_config_xps_tx_ring(ring);
3154 /* clear the context structure first */
3155 memset(&tx_ctx, 0, sizeof(tx_ctx));
3157 tx_ctx.new_context = 1;
3158 tx_ctx.base = (ring->dma / 128);
3159 tx_ctx.qlen = ring->count;
3160 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3161 I40E_FLAG_FD_ATR_ENABLED));
3162 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3163 /* FDIR VSI tx ring can still use RS bit and writebacks */
3164 if (vsi->type != I40E_VSI_FDIR)
3165 tx_ctx.head_wb_ena = 1;
3166 tx_ctx.head_wb_addr = ring->dma +
3167 (ring->count * sizeof(struct i40e_tx_desc));
3169 /* As part of VSI creation/update, FW allocates certain
3170 * Tx arbitration queue sets for each TC enabled for
3171 * the VSI. The FW returns the handles to these queue
3172 * sets as part of the response buffer to Add VSI,
3173 * Update VSI, etc. AQ commands. It is expected that
3174 * these queue set handles be associated with the Tx
3175 * queues by the driver as part of the TX queue context
3176 * initialization. This has to be done regardless of
3177 * DCB as by default everything is mapped to TC0.
3182 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3185 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3187 tx_ctx.rdylist_act = 0;
3189 /* clear the context in the HMC */
3190 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3192 dev_info(&vsi->back->pdev->dev,
3193 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3194 ring->queue_index, pf_q, err);
3198 /* set the context in the HMC */
3199 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3201 dev_info(&vsi->back->pdev->dev,
3202 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3203 ring->queue_index, pf_q, err);
3207 /* Now associate this queue with this PCI function */
3209 if (ring->ch->type == I40E_VSI_VMDQ2)
3210 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3214 qtx_ctl |= (ring->ch->vsi_number <<
3215 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3216 I40E_QTX_CTL_VFVM_INDX_MASK;
3218 if (vsi->type == I40E_VSI_VMDQ2) {
3219 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3220 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3221 I40E_QTX_CTL_VFVM_INDX_MASK;
3223 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3227 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3228 I40E_QTX_CTL_PF_INDX_MASK);
3229 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3232 /* cache tail off for easier writes later */
3233 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3239 * i40e_configure_rx_ring - Configure a receive ring context
3240 * @ring: The Rx ring to configure
3242 * Configure the Rx descriptor ring in the HMC context.
3244 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3246 struct i40e_vsi *vsi = ring->vsi;
3247 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3248 u16 pf_q = vsi->base_queue + ring->queue_index;
3249 struct i40e_hw *hw = &vsi->back->hw;
3250 struct i40e_hmc_obj_rxq rx_ctx;
3251 i40e_status err = 0;
3255 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3257 /* clear the context structure first */
3258 memset(&rx_ctx, 0, sizeof(rx_ctx));
3260 if (ring->vsi->type == I40E_VSI_MAIN)
3261 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3263 ring->xsk_umem = i40e_xsk_umem(ring);
3264 if (ring->xsk_umem) {
3265 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3266 XDP_PACKET_HEADROOM;
3267 /* For AF_XDP ZC, we disallow packets to span on
3268 * multiple buffers, thus letting us skip that
3269 * handling in the fast-path.
3272 ring->zca.free = i40e_zca_free;
3273 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3278 dev_info(&vsi->back->pdev->dev,
3279 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3283 ring->rx_buf_len = vsi->rx_buf_len;
3284 if (ring->vsi->type == I40E_VSI_MAIN) {
3285 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3286 MEM_TYPE_PAGE_SHARED,
3293 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3294 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3296 rx_ctx.base = (ring->dma / 128);
3297 rx_ctx.qlen = ring->count;
3299 /* use 32 byte descriptors */
3302 /* descriptor type is always zero
3305 rx_ctx.hsplit_0 = 0;
3307 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3308 if (hw->revision_id == 0)
3309 rx_ctx.lrxqthresh = 0;
3311 rx_ctx.lrxqthresh = 1;
3312 rx_ctx.crcstrip = 1;
3314 /* this controls whether VLAN is stripped from inner headers */
3316 /* set the prefena field to 1 because the manual says to */
3319 /* clear the context in the HMC */
3320 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3322 dev_info(&vsi->back->pdev->dev,
3323 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3324 ring->queue_index, pf_q, err);
3328 /* set the context in the HMC */
3329 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3331 dev_info(&vsi->back->pdev->dev,
3332 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3333 ring->queue_index, pf_q, err);
3337 /* configure Rx buffer alignment */
3338 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3339 clear_ring_build_skb_enabled(ring);
3341 set_ring_build_skb_enabled(ring);
3343 /* cache tail for quicker writes, and clear the reg before use */
3344 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3345 writel(0, ring->tail);
3347 ok = ring->xsk_umem ?
3348 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3349 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3351 /* Log this in case the user has forgotten to give the kernel
3352 * any buffers, even later in the application.
3354 dev_info(&vsi->back->pdev->dev,
3355 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3356 ring->xsk_umem ? "UMEM enabled " : "",
3357 ring->queue_index, pf_q);
3364 * i40e_vsi_configure_tx - Configure the VSI for Tx
3365 * @vsi: VSI structure describing this set of rings and resources
3367 * Configure the Tx VSI for operation.
3369 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3374 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3375 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3377 if (err || !i40e_enabled_xdp_vsi(vsi))
3380 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3381 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3387 * i40e_vsi_configure_rx - Configure the VSI for Rx
3388 * @vsi: the VSI being configured
3390 * Configure the Rx VSI for operation.
3392 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3397 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3398 vsi->max_frame = I40E_MAX_RXBUFFER;
3399 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3400 #if (PAGE_SIZE < 8192)
3401 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3402 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3403 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3404 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3407 vsi->max_frame = I40E_MAX_RXBUFFER;
3408 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3412 /* set up individual rings */
3413 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3414 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3420 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3421 * @vsi: ptr to the VSI
3423 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3425 struct i40e_ring *tx_ring, *rx_ring;
3426 u16 qoffset, qcount;
3429 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3430 /* Reset the TC information */
3431 for (i = 0; i < vsi->num_queue_pairs; i++) {
3432 rx_ring = vsi->rx_rings[i];
3433 tx_ring = vsi->tx_rings[i];
3434 rx_ring->dcb_tc = 0;
3435 tx_ring->dcb_tc = 0;
3440 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3441 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3444 qoffset = vsi->tc_config.tc_info[n].qoffset;
3445 qcount = vsi->tc_config.tc_info[n].qcount;
3446 for (i = qoffset; i < (qoffset + qcount); i++) {
3447 rx_ring = vsi->rx_rings[i];
3448 tx_ring = vsi->tx_rings[i];
3449 rx_ring->dcb_tc = n;
3450 tx_ring->dcb_tc = n;
3456 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3457 * @vsi: ptr to the VSI
3459 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3462 i40e_set_rx_mode(vsi->netdev);
3466 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3467 * @vsi: Pointer to the targeted VSI
3469 * This function replays the hlist on the hw where all the SB Flow Director
3470 * filters were saved.
3472 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3474 struct i40e_fdir_filter *filter;
3475 struct i40e_pf *pf = vsi->back;
3476 struct hlist_node *node;
3478 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3481 /* Reset FDir counters as we're replaying all existing filters */
3482 pf->fd_tcp4_filter_cnt = 0;
3483 pf->fd_udp4_filter_cnt = 0;
3484 pf->fd_sctp4_filter_cnt = 0;
3485 pf->fd_ip4_filter_cnt = 0;
3487 hlist_for_each_entry_safe(filter, node,
3488 &pf->fdir_filter_list, fdir_node) {
3489 i40e_add_del_fdir(vsi, filter, true);
3494 * i40e_vsi_configure - Set up the VSI for action
3495 * @vsi: the VSI being configured
3497 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3501 i40e_set_vsi_rx_mode(vsi);
3502 i40e_restore_vlan(vsi);
3503 i40e_vsi_config_dcb_rings(vsi);
3504 err = i40e_vsi_configure_tx(vsi);
3506 err = i40e_vsi_configure_rx(vsi);
3512 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3513 * @vsi: the VSI being configured
3515 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3517 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3518 struct i40e_pf *pf = vsi->back;
3519 struct i40e_hw *hw = &pf->hw;
3524 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3525 * and PFINT_LNKLSTn registers, e.g.:
3526 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3528 qp = vsi->base_queue;
3529 vector = vsi->base_vector;
3530 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3531 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3533 q_vector->rx.next_update = jiffies + 1;
3534 q_vector->rx.target_itr =
3535 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3536 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3537 q_vector->rx.target_itr >> 1);
3538 q_vector->rx.current_itr = q_vector->rx.target_itr;
3540 q_vector->tx.next_update = jiffies + 1;
3541 q_vector->tx.target_itr =
3542 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3543 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3544 q_vector->tx.target_itr >> 1);
3545 q_vector->tx.current_itr = q_vector->tx.target_itr;
3547 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3548 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3550 /* Linked list for the queuepairs assigned to this vector */
3551 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3552 for (q = 0; q < q_vector->num_ringpairs; q++) {
3553 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3556 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3557 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3558 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3559 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3560 (I40E_QUEUE_TYPE_TX <<
3561 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3563 wr32(hw, I40E_QINT_RQCTL(qp), val);
3566 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3567 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3568 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3569 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3570 (I40E_QUEUE_TYPE_TX <<
3571 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3573 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3576 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3577 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3578 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3579 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3580 (I40E_QUEUE_TYPE_RX <<
3581 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3583 /* Terminate the linked list */
3584 if (q == (q_vector->num_ringpairs - 1))
3585 val |= (I40E_QUEUE_END_OF_LIST <<
3586 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3588 wr32(hw, I40E_QINT_TQCTL(qp), val);
3597 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3598 * @pf: pointer to private device data structure
3600 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3602 struct i40e_hw *hw = &pf->hw;
3605 /* clear things first */
3606 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3607 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3609 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3610 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3611 I40E_PFINT_ICR0_ENA_GRST_MASK |
3612 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3613 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3614 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3615 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3616 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3618 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3619 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3621 if (pf->flags & I40E_FLAG_PTP)
3622 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3624 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3626 /* SW_ITR_IDX = 0, but don't change INTENA */
3627 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3628 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3630 /* OTHER_ITR_IDX = 0 */
3631 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3635 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3636 * @vsi: the VSI being configured
3638 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3640 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3641 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3642 struct i40e_pf *pf = vsi->back;
3643 struct i40e_hw *hw = &pf->hw;
3646 /* set the ITR configuration */
3647 q_vector->rx.next_update = jiffies + 1;
3648 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3649 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3650 q_vector->rx.current_itr = q_vector->rx.target_itr;
3651 q_vector->tx.next_update = jiffies + 1;
3652 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3653 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3654 q_vector->tx.current_itr = q_vector->tx.target_itr;
3656 i40e_enable_misc_int_causes(pf);
3658 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3659 wr32(hw, I40E_PFINT_LNKLST0, 0);
3661 /* Associate the queue pair to the vector and enable the queue int */
3662 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3663 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3664 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3665 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3667 wr32(hw, I40E_QINT_RQCTL(0), val);
3669 if (i40e_enabled_xdp_vsi(vsi)) {
3670 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3671 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3673 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3675 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3678 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3679 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3680 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3682 wr32(hw, I40E_QINT_TQCTL(0), val);
3687 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3688 * @pf: board private structure
3690 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3692 struct i40e_hw *hw = &pf->hw;
3694 wr32(hw, I40E_PFINT_DYN_CTL0,
3695 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3700 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3701 * @pf: board private structure
3703 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3705 struct i40e_hw *hw = &pf->hw;
3708 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3709 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3710 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3712 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3717 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3718 * @irq: interrupt number
3719 * @data: pointer to a q_vector
3721 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3723 struct i40e_q_vector *q_vector = data;
3725 if (!q_vector->tx.ring && !q_vector->rx.ring)
3728 napi_schedule_irqoff(&q_vector->napi);
3734 * i40e_irq_affinity_notify - Callback for affinity changes
3735 * @notify: context as to what irq was changed
3736 * @mask: the new affinity mask
3738 * This is a callback function used by the irq_set_affinity_notifier function
3739 * so that we may register to receive changes to the irq affinity masks.
3741 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3742 const cpumask_t *mask)
3744 struct i40e_q_vector *q_vector =
3745 container_of(notify, struct i40e_q_vector, affinity_notify);
3747 cpumask_copy(&q_vector->affinity_mask, mask);
3751 * i40e_irq_affinity_release - Callback for affinity notifier release
3752 * @ref: internal core kernel usage
3754 * This is a callback function used by the irq_set_affinity_notifier function
3755 * to inform the current notification subscriber that they will no longer
3756 * receive notifications.
3758 static void i40e_irq_affinity_release(struct kref *ref) {}
3761 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3762 * @vsi: the VSI being configured
3763 * @basename: name for the vector
3765 * Allocates MSI-X vectors and requests interrupts from the kernel.
3767 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3769 int q_vectors = vsi->num_q_vectors;
3770 struct i40e_pf *pf = vsi->back;
3771 int base = vsi->base_vector;
3778 for (vector = 0; vector < q_vectors; vector++) {
3779 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3781 irq_num = pf->msix_entries[base + vector].vector;
3783 if (q_vector->tx.ring && q_vector->rx.ring) {
3784 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3785 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3787 } else if (q_vector->rx.ring) {
3788 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3789 "%s-%s-%d", basename, "rx", rx_int_idx++);
3790 } else if (q_vector->tx.ring) {
3791 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3792 "%s-%s-%d", basename, "tx", tx_int_idx++);
3794 /* skip this unused q_vector */
3797 err = request_irq(irq_num,
3803 dev_info(&pf->pdev->dev,
3804 "MSIX request_irq failed, error: %d\n", err);
3805 goto free_queue_irqs;
3808 /* register for affinity change notifications */
3809 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3810 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3811 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3812 /* Spread affinity hints out across online CPUs.
3814 * get_cpu_mask returns a static constant mask with
3815 * a permanent lifetime so it's ok to pass to
3816 * irq_set_affinity_hint without making a copy.
3818 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3819 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3822 vsi->irqs_ready = true;
3828 irq_num = pf->msix_entries[base + vector].vector;
3829 irq_set_affinity_notifier(irq_num, NULL);
3830 irq_set_affinity_hint(irq_num, NULL);
3831 free_irq(irq_num, &vsi->q_vectors[vector]);
3837 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3838 * @vsi: the VSI being un-configured
3840 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3842 struct i40e_pf *pf = vsi->back;
3843 struct i40e_hw *hw = &pf->hw;
3844 int base = vsi->base_vector;
3847 /* disable interrupt causation from each queue */
3848 for (i = 0; i < vsi->num_queue_pairs; i++) {
3851 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3852 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3853 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3855 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3856 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3857 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3859 if (!i40e_enabled_xdp_vsi(vsi))
3861 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3864 /* disable each interrupt */
3865 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3866 for (i = vsi->base_vector;
3867 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3868 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3871 for (i = 0; i < vsi->num_q_vectors; i++)
3872 synchronize_irq(pf->msix_entries[i + base].vector);
3874 /* Legacy and MSI mode - this stops all interrupt handling */
3875 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3876 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3878 synchronize_irq(pf->pdev->irq);
3883 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3884 * @vsi: the VSI being configured
3886 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3888 struct i40e_pf *pf = vsi->back;
3891 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3892 for (i = 0; i < vsi->num_q_vectors; i++)
3893 i40e_irq_dynamic_enable(vsi, i);
3895 i40e_irq_dynamic_enable_icr0(pf);
3898 i40e_flush(&pf->hw);
3903 * i40e_free_misc_vector - Free the vector that handles non-queue events
3904 * @pf: board private structure
3906 static void i40e_free_misc_vector(struct i40e_pf *pf)
3909 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3910 i40e_flush(&pf->hw);
3912 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3913 synchronize_irq(pf->msix_entries[0].vector);
3914 free_irq(pf->msix_entries[0].vector, pf);
3915 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3920 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3921 * @irq: interrupt number
3922 * @data: pointer to a q_vector
3924 * This is the handler used for all MSI/Legacy interrupts, and deals
3925 * with both queue and non-queue interrupts. This is also used in
3926 * MSIX mode to handle the non-queue interrupts.
3928 static irqreturn_t i40e_intr(int irq, void *data)
3930 struct i40e_pf *pf = (struct i40e_pf *)data;
3931 struct i40e_hw *hw = &pf->hw;
3932 irqreturn_t ret = IRQ_NONE;
3933 u32 icr0, icr0_remaining;
3936 icr0 = rd32(hw, I40E_PFINT_ICR0);
3937 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3939 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3940 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3943 /* if interrupt but no bits showing, must be SWINT */
3944 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3945 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3948 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3949 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3950 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3951 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3952 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3955 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3956 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3957 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3958 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3960 /* We do not have a way to disarm Queue causes while leaving
3961 * interrupt enabled for all other causes, ideally
3962 * interrupt should be disabled while we are in NAPI but
3963 * this is not a performance path and napi_schedule()
3964 * can deal with rescheduling.
3966 if (!test_bit(__I40E_DOWN, pf->state))
3967 napi_schedule_irqoff(&q_vector->napi);
3970 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3971 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3972 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3973 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3976 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3977 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3978 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3981 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3982 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3983 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3986 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3987 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3988 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3989 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3990 val = rd32(hw, I40E_GLGEN_RSTAT);
3991 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3992 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3993 if (val == I40E_RESET_CORER) {
3995 } else if (val == I40E_RESET_GLOBR) {
3997 } else if (val == I40E_RESET_EMPR) {
3999 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4003 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4004 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4005 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4006 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4007 rd32(hw, I40E_PFHMC_ERRORINFO),
4008 rd32(hw, I40E_PFHMC_ERRORDATA));
4011 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4012 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4014 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4015 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4016 i40e_ptp_tx_hwtstamp(pf);
4020 /* If a critical error is pending we have no choice but to reset the
4022 * Report and mask out any remaining unexpected interrupts.
4024 icr0_remaining = icr0 & ena_mask;
4025 if (icr0_remaining) {
4026 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4028 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4029 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4030 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4031 dev_info(&pf->pdev->dev, "device will be reset\n");
4032 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4033 i40e_service_event_schedule(pf);
4035 ena_mask &= ~icr0_remaining;
4040 /* re-enable interrupt causes */
4041 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4042 if (!test_bit(__I40E_DOWN, pf->state) ||
4043 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4044 i40e_service_event_schedule(pf);
4045 i40e_irq_dynamic_enable_icr0(pf);
4052 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4053 * @tx_ring: tx ring to clean
4054 * @budget: how many cleans we're allowed
4056 * Returns true if there's any budget left (e.g. the clean is finished)
4058 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4060 struct i40e_vsi *vsi = tx_ring->vsi;
4061 u16 i = tx_ring->next_to_clean;
4062 struct i40e_tx_buffer *tx_buf;
4063 struct i40e_tx_desc *tx_desc;
4065 tx_buf = &tx_ring->tx_bi[i];
4066 tx_desc = I40E_TX_DESC(tx_ring, i);
4067 i -= tx_ring->count;
4070 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4072 /* if next_to_watch is not set then there is no work pending */
4076 /* prevent any other reads prior to eop_desc */
4079 /* if the descriptor isn't done, no work yet to do */
4080 if (!(eop_desc->cmd_type_offset_bsz &
4081 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4084 /* clear next_to_watch to prevent false hangs */
4085 tx_buf->next_to_watch = NULL;
4087 tx_desc->buffer_addr = 0;
4088 tx_desc->cmd_type_offset_bsz = 0;
4089 /* move past filter desc */
4094 i -= tx_ring->count;
4095 tx_buf = tx_ring->tx_bi;
4096 tx_desc = I40E_TX_DESC(tx_ring, 0);
4098 /* unmap skb header data */
4099 dma_unmap_single(tx_ring->dev,
4100 dma_unmap_addr(tx_buf, dma),
4101 dma_unmap_len(tx_buf, len),
4103 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4104 kfree(tx_buf->raw_buf);
4106 tx_buf->raw_buf = NULL;
4107 tx_buf->tx_flags = 0;
4108 tx_buf->next_to_watch = NULL;
4109 dma_unmap_len_set(tx_buf, len, 0);
4110 tx_desc->buffer_addr = 0;
4111 tx_desc->cmd_type_offset_bsz = 0;
4113 /* move us past the eop_desc for start of next FD desc */
4118 i -= tx_ring->count;
4119 tx_buf = tx_ring->tx_bi;
4120 tx_desc = I40E_TX_DESC(tx_ring, 0);
4123 /* update budget accounting */
4125 } while (likely(budget));
4127 i += tx_ring->count;
4128 tx_ring->next_to_clean = i;
4130 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4131 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4137 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4138 * @irq: interrupt number
4139 * @data: pointer to a q_vector
4141 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4143 struct i40e_q_vector *q_vector = data;
4144 struct i40e_vsi *vsi;
4146 if (!q_vector->tx.ring)
4149 vsi = q_vector->tx.ring->vsi;
4150 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4156 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4157 * @vsi: the VSI being configured
4158 * @v_idx: vector index
4159 * @qp_idx: queue pair index
4161 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4163 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4164 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4165 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4167 tx_ring->q_vector = q_vector;
4168 tx_ring->next = q_vector->tx.ring;
4169 q_vector->tx.ring = tx_ring;
4170 q_vector->tx.count++;
4172 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4173 if (i40e_enabled_xdp_vsi(vsi)) {
4174 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4176 xdp_ring->q_vector = q_vector;
4177 xdp_ring->next = q_vector->tx.ring;
4178 q_vector->tx.ring = xdp_ring;
4179 q_vector->tx.count++;
4182 rx_ring->q_vector = q_vector;
4183 rx_ring->next = q_vector->rx.ring;
4184 q_vector->rx.ring = rx_ring;
4185 q_vector->rx.count++;
4189 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4190 * @vsi: the VSI being configured
4192 * This function maps descriptor rings to the queue-specific vectors
4193 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4194 * one vector per queue pair, but on a constrained vector budget, we
4195 * group the queue pairs as "efficiently" as possible.
4197 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4199 int qp_remaining = vsi->num_queue_pairs;
4200 int q_vectors = vsi->num_q_vectors;
4205 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4206 * group them so there are multiple queues per vector.
4207 * It is also important to go through all the vectors available to be
4208 * sure that if we don't use all the vectors, that the remaining vectors
4209 * are cleared. This is especially important when decreasing the
4210 * number of queues in use.
4212 for (; v_start < q_vectors; v_start++) {
4213 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4215 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4217 q_vector->num_ringpairs = num_ringpairs;
4218 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4220 q_vector->rx.count = 0;
4221 q_vector->tx.count = 0;
4222 q_vector->rx.ring = NULL;
4223 q_vector->tx.ring = NULL;
4225 while (num_ringpairs--) {
4226 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4234 * i40e_vsi_request_irq - Request IRQ from the OS
4235 * @vsi: the VSI being configured
4236 * @basename: name for the vector
4238 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4240 struct i40e_pf *pf = vsi->back;
4243 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4244 err = i40e_vsi_request_irq_msix(vsi, basename);
4245 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4246 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4249 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4253 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4258 #ifdef CONFIG_NET_POLL_CONTROLLER
4260 * i40e_netpoll - A Polling 'interrupt' handler
4261 * @netdev: network interface device structure
4263 * This is used by netconsole to send skbs without having to re-enable
4264 * interrupts. It's not called while the normal interrupt routine is executing.
4266 static void i40e_netpoll(struct net_device *netdev)
4268 struct i40e_netdev_priv *np = netdev_priv(netdev);
4269 struct i40e_vsi *vsi = np->vsi;
4270 struct i40e_pf *pf = vsi->back;
4273 /* if interface is down do nothing */
4274 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4277 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4278 for (i = 0; i < vsi->num_q_vectors; i++)
4279 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4281 i40e_intr(pf->pdev->irq, netdev);
4286 #define I40E_QTX_ENA_WAIT_COUNT 50
4289 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4290 * @pf: the PF being configured
4291 * @pf_q: the PF queue
4292 * @enable: enable or disable state of the queue
4294 * This routine will wait for the given Tx queue of the PF to reach the
4295 * enabled or disabled state.
4296 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4297 * multiple retries; else will return 0 in case of success.
4299 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4304 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4305 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4306 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4309 usleep_range(10, 20);
4311 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4318 * i40e_control_tx_q - Start or stop a particular Tx queue
4319 * @pf: the PF structure
4320 * @pf_q: the PF queue to configure
4321 * @enable: start or stop the queue
4323 * This function enables or disables a single queue. Note that any delay
4324 * required after the operation is expected to be handled by the caller of
4327 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4329 struct i40e_hw *hw = &pf->hw;
4333 /* warn the TX unit of coming changes */
4334 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4336 usleep_range(10, 20);
4338 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4339 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4340 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4341 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4343 usleep_range(1000, 2000);
4346 /* Skip if the queue is already in the requested state */
4347 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4350 /* turn on/off the queue */
4352 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4353 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4355 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4358 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4362 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4364 * @pf: the PF structure
4365 * @pf_q: the PF queue to configure
4366 * @is_xdp: true if the queue is used for XDP
4367 * @enable: start or stop the queue
4369 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4370 bool is_xdp, bool enable)
4374 i40e_control_tx_q(pf, pf_q, enable);
4376 /* wait for the change to finish */
4377 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4379 dev_info(&pf->pdev->dev,
4380 "VSI seid %d %sTx ring %d %sable timeout\n",
4381 seid, (is_xdp ? "XDP " : ""), pf_q,
4382 (enable ? "en" : "dis"));
4389 * i40e_vsi_control_tx - Start or stop a VSI's rings
4390 * @vsi: the VSI being configured
4391 * @enable: start or stop the rings
4393 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4395 struct i40e_pf *pf = vsi->back;
4396 int i, pf_q, ret = 0;
4398 pf_q = vsi->base_queue;
4399 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4400 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4402 false /*is xdp*/, enable);
4406 if (!i40e_enabled_xdp_vsi(vsi))
4409 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4410 pf_q + vsi->alloc_queue_pairs,
4411 true /*is xdp*/, enable);
4419 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4420 * @pf: the PF being configured
4421 * @pf_q: the PF queue
4422 * @enable: enable or disable state of the queue
4424 * This routine will wait for the given Rx queue of the PF to reach the
4425 * enabled or disabled state.
4426 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4427 * multiple retries; else will return 0 in case of success.
4429 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4434 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4435 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4436 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4439 usleep_range(10, 20);
4441 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4448 * i40e_control_rx_q - Start or stop a particular Rx queue
4449 * @pf: the PF structure
4450 * @pf_q: the PF queue to configure
4451 * @enable: start or stop the queue
4453 * This function enables or disables a single queue. Note that
4454 * any delay required after the operation is expected to be
4455 * handled by the caller of this function.
4457 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4459 struct i40e_hw *hw = &pf->hw;
4463 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4464 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4465 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4466 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4468 usleep_range(1000, 2000);
4471 /* Skip if the queue is already in the requested state */
4472 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4475 /* turn on/off the queue */
4477 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4479 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4481 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4485 * i40e_control_wait_rx_q
4486 * @pf: the PF structure
4487 * @pf_q: queue being configured
4488 * @enable: start or stop the rings
4490 * This function enables or disables a single queue along with waiting
4491 * for the change to finish. The caller of this function should handle
4492 * the delays needed in the case of disabling queues.
4494 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4498 i40e_control_rx_q(pf, pf_q, enable);
4500 /* wait for the change to finish */
4501 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4509 * i40e_vsi_control_rx - Start or stop a VSI's rings
4510 * @vsi: the VSI being configured
4511 * @enable: start or stop the rings
4513 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4515 struct i40e_pf *pf = vsi->back;
4516 int i, pf_q, ret = 0;
4518 pf_q = vsi->base_queue;
4519 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4520 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4522 dev_info(&pf->pdev->dev,
4523 "VSI seid %d Rx ring %d %sable timeout\n",
4524 vsi->seid, pf_q, (enable ? "en" : "dis"));
4529 /* Due to HW errata, on Rx disable only, the register can indicate done
4530 * before it really is. Needs 50ms to be sure
4539 * i40e_vsi_start_rings - Start a VSI's rings
4540 * @vsi: the VSI being configured
4542 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4546 /* do rx first for enable and last for disable */
4547 ret = i40e_vsi_control_rx(vsi, true);
4550 ret = i40e_vsi_control_tx(vsi, true);
4556 * i40e_vsi_stop_rings - Stop a VSI's rings
4557 * @vsi: the VSI being configured
4559 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4561 /* When port TX is suspended, don't wait */
4562 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4563 return i40e_vsi_stop_rings_no_wait(vsi);
4565 /* do rx first for enable and last for disable
4566 * Ignore return value, we need to shutdown whatever we can
4568 i40e_vsi_control_tx(vsi, false);
4569 i40e_vsi_control_rx(vsi, false);
4573 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4574 * @vsi: the VSI being shutdown
4576 * This function stops all the rings for a VSI but does not delay to verify
4577 * that rings have been disabled. It is expected that the caller is shutting
4578 * down multiple VSIs at once and will delay together for all the VSIs after
4579 * initiating the shutdown. This is particularly useful for shutting down lots
4580 * of VFs together. Otherwise, a large delay can be incurred while configuring
4581 * each VSI in serial.
4583 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4585 struct i40e_pf *pf = vsi->back;
4588 pf_q = vsi->base_queue;
4589 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4590 i40e_control_tx_q(pf, pf_q, false);
4591 i40e_control_rx_q(pf, pf_q, false);
4596 * i40e_vsi_free_irq - Free the irq association with the OS
4597 * @vsi: the VSI being configured
4599 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4601 struct i40e_pf *pf = vsi->back;
4602 struct i40e_hw *hw = &pf->hw;
4603 int base = vsi->base_vector;
4607 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4608 if (!vsi->q_vectors)
4611 if (!vsi->irqs_ready)
4614 vsi->irqs_ready = false;
4615 for (i = 0; i < vsi->num_q_vectors; i++) {
4620 irq_num = pf->msix_entries[vector].vector;
4622 /* free only the irqs that were actually requested */
4623 if (!vsi->q_vectors[i] ||
4624 !vsi->q_vectors[i]->num_ringpairs)
4627 /* clear the affinity notifier in the IRQ descriptor */
4628 irq_set_affinity_notifier(irq_num, NULL);
4629 /* remove our suggested affinity mask for this IRQ */
4630 irq_set_affinity_hint(irq_num, NULL);
4631 synchronize_irq(irq_num);
4632 free_irq(irq_num, vsi->q_vectors[i]);
4634 /* Tear down the interrupt queue link list
4636 * We know that they come in pairs and always
4637 * the Rx first, then the Tx. To clear the
4638 * link list, stick the EOL value into the
4639 * next_q field of the registers.
4641 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4642 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4643 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4644 val |= I40E_QUEUE_END_OF_LIST
4645 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4646 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4648 while (qp != I40E_QUEUE_END_OF_LIST) {
4651 val = rd32(hw, I40E_QINT_RQCTL(qp));
4653 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4654 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4655 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4656 I40E_QINT_RQCTL_INTEVENT_MASK);
4658 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4659 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4661 wr32(hw, I40E_QINT_RQCTL(qp), val);
4663 val = rd32(hw, I40E_QINT_TQCTL(qp));
4665 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4666 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4668 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4669 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4670 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4671 I40E_QINT_TQCTL_INTEVENT_MASK);
4673 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4674 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4676 wr32(hw, I40E_QINT_TQCTL(qp), val);
4681 free_irq(pf->pdev->irq, pf);
4683 val = rd32(hw, I40E_PFINT_LNKLST0);
4684 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4685 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4686 val |= I40E_QUEUE_END_OF_LIST
4687 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4688 wr32(hw, I40E_PFINT_LNKLST0, val);
4690 val = rd32(hw, I40E_QINT_RQCTL(qp));
4691 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4692 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4693 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4694 I40E_QINT_RQCTL_INTEVENT_MASK);
4696 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4697 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4699 wr32(hw, I40E_QINT_RQCTL(qp), val);
4701 val = rd32(hw, I40E_QINT_TQCTL(qp));
4703 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4704 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4705 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4706 I40E_QINT_TQCTL_INTEVENT_MASK);
4708 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4709 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4711 wr32(hw, I40E_QINT_TQCTL(qp), val);
4716 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4717 * @vsi: the VSI being configured
4718 * @v_idx: Index of vector to be freed
4720 * This function frees the memory allocated to the q_vector. In addition if
4721 * NAPI is enabled it will delete any references to the NAPI struct prior
4722 * to freeing the q_vector.
4724 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4726 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4727 struct i40e_ring *ring;
4732 /* disassociate q_vector from rings */
4733 i40e_for_each_ring(ring, q_vector->tx)
4734 ring->q_vector = NULL;
4736 i40e_for_each_ring(ring, q_vector->rx)
4737 ring->q_vector = NULL;
4739 /* only VSI w/ an associated netdev is set up w/ NAPI */
4741 netif_napi_del(&q_vector->napi);
4743 vsi->q_vectors[v_idx] = NULL;
4745 kfree_rcu(q_vector, rcu);
4749 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4750 * @vsi: the VSI being un-configured
4752 * This frees the memory allocated to the q_vectors and
4753 * deletes references to the NAPI struct.
4755 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4759 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4760 i40e_free_q_vector(vsi, v_idx);
4764 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4765 * @pf: board private structure
4767 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4769 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4771 pci_disable_msix(pf->pdev);
4772 kfree(pf->msix_entries);
4773 pf->msix_entries = NULL;
4774 kfree(pf->irq_pile);
4775 pf->irq_pile = NULL;
4776 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4777 pci_disable_msi(pf->pdev);
4779 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4783 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4784 * @pf: board private structure
4786 * We go through and clear interrupt specific resources and reset the structure
4787 * to pre-load conditions
4789 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4793 i40e_free_misc_vector(pf);
4795 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4796 I40E_IWARP_IRQ_PILE_ID);
4798 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4799 for (i = 0; i < pf->num_alloc_vsi; i++)
4801 i40e_vsi_free_q_vectors(pf->vsi[i]);
4802 i40e_reset_interrupt_capability(pf);
4806 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4807 * @vsi: the VSI being configured
4809 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4816 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4817 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4819 if (q_vector->rx.ring || q_vector->tx.ring)
4820 napi_enable(&q_vector->napi);
4825 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4826 * @vsi: the VSI being configured
4828 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4835 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4836 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4838 if (q_vector->rx.ring || q_vector->tx.ring)
4839 napi_disable(&q_vector->napi);
4844 * i40e_vsi_close - Shut down a VSI
4845 * @vsi: the vsi to be quelled
4847 static void i40e_vsi_close(struct i40e_vsi *vsi)
4849 struct i40e_pf *pf = vsi->back;
4850 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4852 i40e_vsi_free_irq(vsi);
4853 i40e_vsi_free_tx_resources(vsi);
4854 i40e_vsi_free_rx_resources(vsi);
4855 vsi->current_netdev_flags = 0;
4856 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4857 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4858 set_bit(__I40E_CLIENT_RESET, pf->state);
4862 * i40e_quiesce_vsi - Pause a given VSI
4863 * @vsi: the VSI being paused
4865 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4867 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4870 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4871 if (vsi->netdev && netif_running(vsi->netdev))
4872 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4874 i40e_vsi_close(vsi);
4878 * i40e_unquiesce_vsi - Resume a given VSI
4879 * @vsi: the VSI being resumed
4881 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4883 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4886 if (vsi->netdev && netif_running(vsi->netdev))
4887 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4889 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4893 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4896 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4900 for (v = 0; v < pf->num_alloc_vsi; v++) {
4902 i40e_quiesce_vsi(pf->vsi[v]);
4907 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4910 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4914 for (v = 0; v < pf->num_alloc_vsi; v++) {
4916 i40e_unquiesce_vsi(pf->vsi[v]);
4921 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4922 * @vsi: the VSI being configured
4924 * Wait until all queues on a given VSI have been disabled.
4926 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4928 struct i40e_pf *pf = vsi->back;
4931 pf_q = vsi->base_queue;
4932 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4933 /* Check and wait for the Tx queue */
4934 ret = i40e_pf_txq_wait(pf, pf_q, false);
4936 dev_info(&pf->pdev->dev,
4937 "VSI seid %d Tx ring %d disable timeout\n",
4942 if (!i40e_enabled_xdp_vsi(vsi))
4945 /* Check and wait for the XDP Tx queue */
4946 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4949 dev_info(&pf->pdev->dev,
4950 "VSI seid %d XDP Tx ring %d disable timeout\n",
4955 /* Check and wait for the Rx queue */
4956 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4958 dev_info(&pf->pdev->dev,
4959 "VSI seid %d Rx ring %d disable timeout\n",
4968 #ifdef CONFIG_I40E_DCB
4970 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4973 * This function waits for the queues to be in disabled state for all the
4974 * VSIs that are managed by this PF.
4976 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4980 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4982 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4994 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4995 * @pf: pointer to PF
4997 * Get TC map for ISCSI PF type that will include iSCSI TC
5000 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5002 struct i40e_dcb_app_priority_table app;
5003 struct i40e_hw *hw = &pf->hw;
5004 u8 enabled_tc = 1; /* TC0 is always enabled */
5006 /* Get the iSCSI APP TLV */
5007 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5009 for (i = 0; i < dcbcfg->numapps; i++) {
5010 app = dcbcfg->app[i];
5011 if (app.selector == I40E_APP_SEL_TCPIP &&
5012 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5013 tc = dcbcfg->etscfg.prioritytable[app.priority];
5014 enabled_tc |= BIT(tc);
5023 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5024 * @dcbcfg: the corresponding DCBx configuration structure
5026 * Return the number of TCs from given DCBx configuration
5028 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5030 int i, tc_unused = 0;
5034 /* Scan the ETS Config Priority Table to find
5035 * traffic class enabled for a given priority
5036 * and create a bitmask of enabled TCs
5038 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5039 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5041 /* Now scan the bitmask to check for
5042 * contiguous TCs starting with TC0
5044 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5045 if (num_tc & BIT(i)) {
5049 pr_err("Non-contiguous TC - Disabling DCB\n");
5057 /* There is always at least TC0 */
5065 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5066 * @dcbcfg: the corresponding DCBx configuration structure
5068 * Query the current DCB configuration and return the number of
5069 * traffic classes enabled from the given DCBX config
5071 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5073 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5077 for (i = 0; i < num_tc; i++)
5078 enabled_tc |= BIT(i);
5084 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5085 * @pf: PF being queried
5087 * Query the current MQPRIO configuration and return the number of
5088 * traffic classes enabled.
5090 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5092 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5093 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5094 u8 enabled_tc = 1, i;
5096 for (i = 1; i < num_tc; i++)
5097 enabled_tc |= BIT(i);
5102 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5103 * @pf: PF being queried
5105 * Return number of traffic classes enabled for the given PF
5107 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5109 struct i40e_hw *hw = &pf->hw;
5110 u8 i, enabled_tc = 1;
5112 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5114 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5115 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5117 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5118 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5121 /* SFP mode will be enabled for all TCs on port */
5122 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5123 return i40e_dcb_get_num_tc(dcbcfg);
5125 /* MFP mode return count of enabled TCs for this PF */
5126 if (pf->hw.func_caps.iscsi)
5127 enabled_tc = i40e_get_iscsi_tc_map(pf);
5129 return 1; /* Only TC0 */
5131 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5132 if (enabled_tc & BIT(i))
5139 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5140 * @pf: PF being queried
5142 * Return a bitmap for enabled traffic classes for this PF.
5144 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5146 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5147 return i40e_mqprio_get_enabled_tc(pf);
5149 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5152 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5153 return I40E_DEFAULT_TRAFFIC_CLASS;
5155 /* SFP mode we want PF to be enabled for all TCs */
5156 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5157 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5159 /* MFP enabled and iSCSI PF type */
5160 if (pf->hw.func_caps.iscsi)
5161 return i40e_get_iscsi_tc_map(pf);
5163 return I40E_DEFAULT_TRAFFIC_CLASS;
5167 * i40e_vsi_get_bw_info - Query VSI BW Information
5168 * @vsi: the VSI being queried
5170 * Returns 0 on success, negative value on failure
5172 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5174 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5175 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5176 struct i40e_pf *pf = vsi->back;
5177 struct i40e_hw *hw = &pf->hw;
5182 /* Get the VSI level BW configuration */
5183 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5185 dev_info(&pf->pdev->dev,
5186 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5187 i40e_stat_str(&pf->hw, ret),
5188 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5192 /* Get the VSI level BW configuration per TC */
5193 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5196 dev_info(&pf->pdev->dev,
5197 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5198 i40e_stat_str(&pf->hw, ret),
5199 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5203 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5204 dev_info(&pf->pdev->dev,
5205 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5206 bw_config.tc_valid_bits,
5207 bw_ets_config.tc_valid_bits);
5208 /* Still continuing */
5211 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5212 vsi->bw_max_quanta = bw_config.max_bw;
5213 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5214 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5215 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5216 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5217 vsi->bw_ets_limit_credits[i] =
5218 le16_to_cpu(bw_ets_config.credits[i]);
5219 /* 3 bits out of 4 for each TC */
5220 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5227 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5228 * @vsi: the VSI being configured
5229 * @enabled_tc: TC bitmap
5230 * @bw_share: BW shared credits per TC
5232 * Returns 0 on success, negative value on failure
5234 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5237 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5238 struct i40e_pf *pf = vsi->back;
5242 /* There is no need to reset BW when mqprio mode is on. */
5243 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5245 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5246 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5248 dev_info(&pf->pdev->dev,
5249 "Failed to reset tx rate for vsi->seid %u\n",
5253 bw_data.tc_valid_bits = enabled_tc;
5254 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5255 bw_data.tc_bw_credits[i] = bw_share[i];
5257 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5259 dev_info(&pf->pdev->dev,
5260 "AQ command Config VSI BW allocation per TC failed = %d\n",
5261 pf->hw.aq.asq_last_status);
5265 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5266 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5272 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5273 * @vsi: the VSI being configured
5274 * @enabled_tc: TC map to be enabled
5277 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5279 struct net_device *netdev = vsi->netdev;
5280 struct i40e_pf *pf = vsi->back;
5281 struct i40e_hw *hw = &pf->hw;
5284 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5290 netdev_reset_tc(netdev);
5294 /* Set up actual enabled TCs on the VSI */
5295 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5298 /* set per TC queues for the VSI */
5299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5300 /* Only set TC queues for enabled tcs
5302 * e.g. For a VSI that has TC0 and TC3 enabled the
5303 * enabled_tc bitmap would be 0x00001001; the driver
5304 * will set the numtc for netdev as 2 that will be
5305 * referenced by the netdev layer as TC 0 and 1.
5307 if (vsi->tc_config.enabled_tc & BIT(i))
5308 netdev_set_tc_queue(netdev,
5309 vsi->tc_config.tc_info[i].netdev_tc,
5310 vsi->tc_config.tc_info[i].qcount,
5311 vsi->tc_config.tc_info[i].qoffset);
5314 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5317 /* Assign UP2TC map for the VSI */
5318 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5319 /* Get the actual TC# for the UP */
5320 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5321 /* Get the mapped netdev TC# for the UP */
5322 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5323 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5328 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5329 * @vsi: the VSI being configured
5330 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5332 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5333 struct i40e_vsi_context *ctxt)
5335 /* copy just the sections touched not the entire info
5336 * since not all sections are valid as returned by
5339 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5340 memcpy(&vsi->info.queue_mapping,
5341 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5342 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5343 sizeof(vsi->info.tc_mapping));
5347 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5348 * @vsi: VSI to be configured
5349 * @enabled_tc: TC bitmap
5351 * This configures a particular VSI for TCs that are mapped to the
5352 * given TC bitmap. It uses default bandwidth share for TCs across
5353 * VSIs to configure TC for a particular VSI.
5356 * It is expected that the VSI queues have been quisced before calling
5359 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5361 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5362 struct i40e_pf *pf = vsi->back;
5363 struct i40e_hw *hw = &pf->hw;
5364 struct i40e_vsi_context ctxt;
5368 /* Check if enabled_tc is same as existing or new TCs */
5369 if (vsi->tc_config.enabled_tc == enabled_tc &&
5370 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5373 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5374 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5375 if (enabled_tc & BIT(i))
5379 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5381 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5383 dev_info(&pf->pdev->dev,
5384 "Failed configuring TC map %d for VSI %d\n",
5385 enabled_tc, vsi->seid);
5386 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5389 dev_info(&pf->pdev->dev,
5390 "Failed querying vsi bw info, err %s aq_err %s\n",
5391 i40e_stat_str(hw, ret),
5392 i40e_aq_str(hw, hw->aq.asq_last_status));
5395 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5396 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5399 valid_tc = bw_config.tc_valid_bits;
5400 /* Always enable TC0, no matter what */
5402 dev_info(&pf->pdev->dev,
5403 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5404 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5405 enabled_tc = valid_tc;
5408 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5410 dev_err(&pf->pdev->dev,
5411 "Unable to configure TC map %d for VSI %d\n",
5412 enabled_tc, vsi->seid);
5417 /* Update Queue Pairs Mapping for currently enabled UPs */
5418 ctxt.seid = vsi->seid;
5419 ctxt.pf_num = vsi->back->hw.pf_id;
5421 ctxt.uplink_seid = vsi->uplink_seid;
5422 ctxt.info = vsi->info;
5423 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5424 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5428 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5431 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5434 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5435 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5436 vsi->num_queue_pairs);
5437 ret = i40e_vsi_config_rss(vsi);
5439 dev_info(&vsi->back->pdev->dev,
5440 "Failed to reconfig rss for num_queues\n");
5443 vsi->reconfig_rss = false;
5445 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5446 ctxt.info.valid_sections |=
5447 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5448 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5451 /* Update the VSI after updating the VSI queue-mapping
5454 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5456 dev_info(&pf->pdev->dev,
5457 "Update vsi tc config failed, err %s aq_err %s\n",
5458 i40e_stat_str(hw, ret),
5459 i40e_aq_str(hw, hw->aq.asq_last_status));
5462 /* update the local VSI info with updated queue map */
5463 i40e_vsi_update_queue_map(vsi, &ctxt);
5464 vsi->info.valid_sections = 0;
5466 /* Update current VSI BW information */
5467 ret = i40e_vsi_get_bw_info(vsi);
5469 dev_info(&pf->pdev->dev,
5470 "Failed updating vsi bw info, err %s aq_err %s\n",
5471 i40e_stat_str(hw, ret),
5472 i40e_aq_str(hw, hw->aq.asq_last_status));
5476 /* Update the netdev TC setup */
5477 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5483 * i40e_get_link_speed - Returns link speed for the interface
5484 * @vsi: VSI to be configured
5487 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5489 struct i40e_pf *pf = vsi->back;
5491 switch (pf->hw.phy.link_info.link_speed) {
5492 case I40E_LINK_SPEED_40GB:
5494 case I40E_LINK_SPEED_25GB:
5496 case I40E_LINK_SPEED_20GB:
5498 case I40E_LINK_SPEED_10GB:
5500 case I40E_LINK_SPEED_1GB:
5508 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5509 * @vsi: VSI to be configured
5510 * @seid: seid of the channel/VSI
5511 * @max_tx_rate: max TX rate to be configured as BW limit
5513 * Helper function to set BW limit for a given VSI
5515 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5517 struct i40e_pf *pf = vsi->back;
5522 speed = i40e_get_link_speed(vsi);
5523 if (max_tx_rate > speed) {
5524 dev_err(&pf->pdev->dev,
5525 "Invalid max tx rate %llu specified for VSI seid %d.",
5529 if (max_tx_rate && max_tx_rate < 50) {
5530 dev_warn(&pf->pdev->dev,
5531 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5535 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5536 credits = max_tx_rate;
5537 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5538 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5539 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5541 dev_err(&pf->pdev->dev,
5542 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5543 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5544 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5549 * i40e_remove_queue_channels - Remove queue channels for the TCs
5550 * @vsi: VSI to be configured
5552 * Remove queue channels for the TCs
5554 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5556 enum i40e_admin_queue_err last_aq_status;
5557 struct i40e_cloud_filter *cfilter;
5558 struct i40e_channel *ch, *ch_tmp;
5559 struct i40e_pf *pf = vsi->back;
5560 struct hlist_node *node;
5563 /* Reset rss size that was stored when reconfiguring rss for
5564 * channel VSIs with non-power-of-2 queue count.
5566 vsi->current_rss_size = 0;
5568 /* perform cleanup for channels if they exist */
5569 if (list_empty(&vsi->ch_list))
5572 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5573 struct i40e_vsi *p_vsi;
5575 list_del(&ch->list);
5576 p_vsi = ch->parent_vsi;
5577 if (!p_vsi || !ch->initialized) {
5581 /* Reset queue contexts */
5582 for (i = 0; i < ch->num_queue_pairs; i++) {
5583 struct i40e_ring *tx_ring, *rx_ring;
5586 pf_q = ch->base_queue + i;
5587 tx_ring = vsi->tx_rings[pf_q];
5590 rx_ring = vsi->rx_rings[pf_q];
5594 /* Reset BW configured for this VSI via mqprio */
5595 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5597 dev_info(&vsi->back->pdev->dev,
5598 "Failed to reset tx rate for ch->seid %u\n",
5601 /* delete cloud filters associated with this channel */
5602 hlist_for_each_entry_safe(cfilter, node,
5603 &pf->cloud_filter_list, cloud_node) {
5604 if (cfilter->seid != ch->seid)
5607 hash_del(&cfilter->cloud_node);
5608 if (cfilter->dst_port)
5609 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5613 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5615 last_aq_status = pf->hw.aq.asq_last_status;
5617 dev_info(&pf->pdev->dev,
5618 "Failed to delete cloud filter, err %s aq_err %s\n",
5619 i40e_stat_str(&pf->hw, ret),
5620 i40e_aq_str(&pf->hw, last_aq_status));
5624 /* delete VSI from FW */
5625 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5628 dev_err(&vsi->back->pdev->dev,
5629 "unable to remove channel (%d) for parent VSI(%d)\n",
5630 ch->seid, p_vsi->seid);
5633 INIT_LIST_HEAD(&vsi->ch_list);
5637 * i40e_is_any_channel - channel exist or not
5638 * @vsi: ptr to VSI to which channels are associated with
5640 * Returns true or false if channel(s) exist for associated VSI or not
5642 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5644 struct i40e_channel *ch, *ch_tmp;
5646 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5647 if (ch->initialized)
5655 * i40e_get_max_queues_for_channel
5656 * @vsi: ptr to VSI to which channels are associated with
5658 * Helper function which returns max value among the queue counts set on the
5659 * channels/TCs created.
5661 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5663 struct i40e_channel *ch, *ch_tmp;
5666 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5667 if (!ch->initialized)
5669 if (ch->num_queue_pairs > max)
5670 max = ch->num_queue_pairs;
5677 * i40e_validate_num_queues - validate num_queues w.r.t channel
5678 * @pf: ptr to PF device
5679 * @num_queues: number of queues
5680 * @vsi: the parent VSI
5681 * @reconfig_rss: indicates should the RSS be reconfigured or not
5683 * This function validates number of queues in the context of new channel
5684 * which is being established and determines if RSS should be reconfigured
5685 * or not for parent VSI.
5687 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5688 struct i40e_vsi *vsi, bool *reconfig_rss)
5695 *reconfig_rss = false;
5696 if (vsi->current_rss_size) {
5697 if (num_queues > vsi->current_rss_size) {
5698 dev_dbg(&pf->pdev->dev,
5699 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5700 num_queues, vsi->current_rss_size);
5702 } else if ((num_queues < vsi->current_rss_size) &&
5703 (!is_power_of_2(num_queues))) {
5704 dev_dbg(&pf->pdev->dev,
5705 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5706 num_queues, vsi->current_rss_size);
5711 if (!is_power_of_2(num_queues)) {
5712 /* Find the max num_queues configured for channel if channel
5714 * if channel exist, then enforce 'num_queues' to be more than
5715 * max ever queues configured for channel.
5717 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5718 if (num_queues < max_ch_queues) {
5719 dev_dbg(&pf->pdev->dev,
5720 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5721 num_queues, max_ch_queues);
5724 *reconfig_rss = true;
5731 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5732 * @vsi: the VSI being setup
5733 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5735 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5737 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5739 struct i40e_pf *pf = vsi->back;
5740 u8 seed[I40E_HKEY_ARRAY_SIZE];
5741 struct i40e_hw *hw = &pf->hw;
5749 if (rss_size > vsi->rss_size)
5752 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5753 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5757 /* Ignoring user configured lut if there is one */
5758 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5760 /* Use user configured hash key if there is one, otherwise
5763 if (vsi->rss_hkey_user)
5764 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5766 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5768 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5770 dev_info(&pf->pdev->dev,
5771 "Cannot set RSS lut, err %s aq_err %s\n",
5772 i40e_stat_str(hw, ret),
5773 i40e_aq_str(hw, hw->aq.asq_last_status));
5779 /* Do the update w.r.t. storing rss_size */
5780 if (!vsi->orig_rss_size)
5781 vsi->orig_rss_size = vsi->rss_size;
5782 vsi->current_rss_size = local_rss_size;
5788 * i40e_channel_setup_queue_map - Setup a channel queue map
5789 * @pf: ptr to PF device
5790 * @vsi: the VSI being setup
5791 * @ctxt: VSI context structure
5792 * @ch: ptr to channel structure
5794 * Setup queue map for a specific channel
5796 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5797 struct i40e_vsi_context *ctxt,
5798 struct i40e_channel *ch)
5800 u16 qcount, qmap, sections = 0;
5804 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5805 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5807 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5808 ch->num_queue_pairs = qcount;
5810 /* find the next higher power-of-2 of num queue pairs */
5811 pow = ilog2(qcount);
5812 if (!is_power_of_2(qcount))
5815 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5816 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5818 /* Setup queue TC[0].qmap for given VSI context */
5819 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5821 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5822 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5823 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5824 ctxt->info.valid_sections |= cpu_to_le16(sections);
5828 * i40e_add_channel - add a channel by adding VSI
5829 * @pf: ptr to PF device
5830 * @uplink_seid: underlying HW switching element (VEB) ID
5831 * @ch: ptr to channel structure
5833 * Add a channel (VSI) using add_vsi and queue_map
5835 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5836 struct i40e_channel *ch)
5838 struct i40e_hw *hw = &pf->hw;
5839 struct i40e_vsi_context ctxt;
5840 u8 enabled_tc = 0x1; /* TC0 enabled */
5843 if (ch->type != I40E_VSI_VMDQ2) {
5844 dev_info(&pf->pdev->dev,
5845 "add new vsi failed, ch->type %d\n", ch->type);
5849 memset(&ctxt, 0, sizeof(ctxt));
5850 ctxt.pf_num = hw->pf_id;
5852 ctxt.uplink_seid = uplink_seid;
5853 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5854 if (ch->type == I40E_VSI_VMDQ2)
5855 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5857 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5858 ctxt.info.valid_sections |=
5859 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5860 ctxt.info.switch_id =
5861 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5864 /* Set queue map for a given VSI context */
5865 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5867 /* Now time to create VSI */
5868 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5870 dev_info(&pf->pdev->dev,
5871 "add new vsi failed, err %s aq_err %s\n",
5872 i40e_stat_str(&pf->hw, ret),
5873 i40e_aq_str(&pf->hw,
5874 pf->hw.aq.asq_last_status));
5878 /* Success, update channel, set enabled_tc only if the channel
5881 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5882 ch->seid = ctxt.seid;
5883 ch->vsi_number = ctxt.vsi_number;
5884 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5886 /* copy just the sections touched not the entire info
5887 * since not all sections are valid as returned by
5890 ch->info.mapping_flags = ctxt.info.mapping_flags;
5891 memcpy(&ch->info.queue_mapping,
5892 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5893 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5894 sizeof(ctxt.info.tc_mapping));
5899 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5902 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5906 bw_data.tc_valid_bits = ch->enabled_tc;
5907 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5908 bw_data.tc_bw_credits[i] = bw_share[i];
5910 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5913 dev_info(&vsi->back->pdev->dev,
5914 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5915 vsi->back->hw.aq.asq_last_status, ch->seid);
5919 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5920 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5926 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5927 * @pf: ptr to PF device
5928 * @vsi: the VSI being setup
5929 * @ch: ptr to channel structure
5931 * Configure TX rings associated with channel (VSI) since queues are being
5934 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5935 struct i40e_vsi *vsi,
5936 struct i40e_channel *ch)
5940 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5942 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5943 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5944 if (ch->enabled_tc & BIT(i))
5948 /* configure BW for new VSI */
5949 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5951 dev_info(&vsi->back->pdev->dev,
5952 "Failed configuring TC map %d for channel (seid %u)\n",
5953 ch->enabled_tc, ch->seid);
5957 for (i = 0; i < ch->num_queue_pairs; i++) {
5958 struct i40e_ring *tx_ring, *rx_ring;
5961 pf_q = ch->base_queue + i;
5963 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5966 tx_ring = vsi->tx_rings[pf_q];
5969 /* Get the RX ring ptr */
5970 rx_ring = vsi->rx_rings[pf_q];
5978 * i40e_setup_hw_channel - setup new channel
5979 * @pf: ptr to PF device
5980 * @vsi: the VSI being setup
5981 * @ch: ptr to channel structure
5982 * @uplink_seid: underlying HW switching element (VEB) ID
5983 * @type: type of channel to be created (VMDq2/VF)
5985 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5986 * and configures TX rings accordingly
5988 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5989 struct i40e_vsi *vsi,
5990 struct i40e_channel *ch,
5991 u16 uplink_seid, u8 type)
5995 ch->initialized = false;
5996 ch->base_queue = vsi->next_base_queue;
5999 /* Proceed with creation of channel (VMDq2) VSI */
6000 ret = i40e_add_channel(pf, uplink_seid, ch);
6002 dev_info(&pf->pdev->dev,
6003 "failed to add_channel using uplink_seid %u\n",
6008 /* Mark the successful creation of channel */
6009 ch->initialized = true;
6011 /* Reconfigure TX queues using QTX_CTL register */
6012 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6014 dev_info(&pf->pdev->dev,
6015 "failed to configure TX rings for channel %u\n",
6020 /* update 'next_base_queue' */
6021 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6022 dev_dbg(&pf->pdev->dev,
6023 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6024 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6025 ch->num_queue_pairs,
6026 vsi->next_base_queue);
6031 * i40e_setup_channel - setup new channel using uplink element
6032 * @pf: ptr to PF device
6033 * @type: type of channel to be created (VMDq2/VF)
6034 * @uplink_seid: underlying HW switching element (VEB) ID
6035 * @ch: ptr to channel structure
6037 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6038 * and uplink switching element (uplink_seid)
6040 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6041 struct i40e_channel *ch)
6047 if (vsi->type == I40E_VSI_MAIN) {
6048 vsi_type = I40E_VSI_VMDQ2;
6050 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6055 /* underlying switching element */
6056 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6058 /* create channel (VSI), configure TX rings */
6059 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6061 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6065 return ch->initialized ? true : false;
6069 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6070 * @vsi: ptr to VSI which has PF backing
6072 * Sets up switch mode correctly if it needs to be changed and perform
6073 * what are allowed modes.
6075 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6078 struct i40e_pf *pf = vsi->back;
6079 struct i40e_hw *hw = &pf->hw;
6082 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6086 if (hw->dev_caps.switch_mode) {
6087 /* if switch mode is set, support mode2 (non-tunneled for
6088 * cloud filter) for now
6090 u32 switch_mode = hw->dev_caps.switch_mode &
6091 I40E_SWITCH_MODE_MASK;
6092 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6093 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6095 dev_err(&pf->pdev->dev,
6096 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6097 hw->dev_caps.switch_mode);
6102 /* Set Bit 7 to be valid */
6103 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6105 /* Set L4type for TCP support */
6106 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6108 /* Set cloud filter mode */
6109 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6111 /* Prep mode field for set_switch_config */
6112 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6113 pf->last_sw_conf_valid_flags,
6115 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6116 dev_err(&pf->pdev->dev,
6117 "couldn't set switch config bits, err %s aq_err %s\n",
6118 i40e_stat_str(hw, ret),
6120 hw->aq.asq_last_status));
6126 * i40e_create_queue_channel - function to create channel
6127 * @vsi: VSI to be configured
6128 * @ch: ptr to channel (it contains channel specific params)
6130 * This function creates channel (VSI) using num_queues specified by user,
6131 * reconfigs RSS if needed.
6133 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6134 struct i40e_channel *ch)
6136 struct i40e_pf *pf = vsi->back;
6143 if (!ch->num_queue_pairs) {
6144 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6145 ch->num_queue_pairs);
6149 /* validate user requested num_queues for channel */
6150 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6153 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6154 ch->num_queue_pairs);
6158 /* By default we are in VEPA mode, if this is the first VF/VMDq
6159 * VSI to be added switch to VEB mode.
6161 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6162 (!i40e_is_any_channel(vsi))) {
6163 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6164 dev_dbg(&pf->pdev->dev,
6165 "Failed to create channel. Override queues (%u) not power of 2\n",
6166 vsi->tc_config.tc_info[0].qcount);
6170 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6171 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6173 if (vsi->type == I40E_VSI_MAIN) {
6174 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6175 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6178 i40e_do_reset_safe(pf,
6179 I40E_PF_RESET_FLAG);
6182 /* now onwards for main VSI, number of queues will be value
6183 * of TC0's queue count
6187 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6188 * it should be more than num_queues
6190 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6191 dev_dbg(&pf->pdev->dev,
6192 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6193 vsi->cnt_q_avail, ch->num_queue_pairs);
6197 /* reconfig_rss only if vsi type is MAIN_VSI */
6198 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6199 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6201 dev_info(&pf->pdev->dev,
6202 "Error: unable to reconfig rss for num_queues (%u)\n",
6203 ch->num_queue_pairs);
6208 if (!i40e_setup_channel(pf, vsi, ch)) {
6209 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6213 dev_info(&pf->pdev->dev,
6214 "Setup channel (id:%u) utilizing num_queues %d\n",
6215 ch->seid, ch->num_queue_pairs);
6217 /* configure VSI for BW limit */
6218 if (ch->max_tx_rate) {
6219 u64 credits = ch->max_tx_rate;
6221 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6224 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6225 dev_dbg(&pf->pdev->dev,
6226 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6232 /* in case of VF, this will be main SRIOV VSI */
6233 ch->parent_vsi = vsi;
6235 /* and update main_vsi's count for queue_available to use */
6236 vsi->cnt_q_avail -= ch->num_queue_pairs;
6242 * i40e_configure_queue_channels - Add queue channel for the given TCs
6243 * @vsi: VSI to be configured
6245 * Configures queue channel mapping to the given TCs
6247 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6249 struct i40e_channel *ch;
6253 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6254 vsi->tc_seid_map[0] = vsi->seid;
6255 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6256 if (vsi->tc_config.enabled_tc & BIT(i)) {
6257 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6263 INIT_LIST_HEAD(&ch->list);
6264 ch->num_queue_pairs =
6265 vsi->tc_config.tc_info[i].qcount;
6267 vsi->tc_config.tc_info[i].qoffset;
6269 /* Bandwidth limit through tc interface is in bytes/s,
6272 max_rate = vsi->mqprio_qopt.max_rate[i];
6273 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6274 ch->max_tx_rate = max_rate;
6276 list_add_tail(&ch->list, &vsi->ch_list);
6278 ret = i40e_create_queue_channel(vsi, ch);
6280 dev_err(&vsi->back->pdev->dev,
6281 "Failed creating queue channel with TC%d: queues %d\n",
6282 i, ch->num_queue_pairs);
6285 vsi->tc_seid_map[i] = ch->seid;
6291 i40e_remove_queue_channels(vsi);
6296 * i40e_veb_config_tc - Configure TCs for given VEB
6298 * @enabled_tc: TC bitmap
6300 * Configures given TC bitmap for VEB (switching) element
6302 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6304 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6305 struct i40e_pf *pf = veb->pf;
6309 /* No TCs or already enabled TCs just return */
6310 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6313 bw_data.tc_valid_bits = enabled_tc;
6314 /* bw_data.absolute_credits is not set (relative) */
6316 /* Enable ETS TCs with equal BW Share for now */
6317 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6318 if (enabled_tc & BIT(i))
6319 bw_data.tc_bw_share_credits[i] = 1;
6322 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6325 dev_info(&pf->pdev->dev,
6326 "VEB bw config failed, err %s aq_err %s\n",
6327 i40e_stat_str(&pf->hw, ret),
6328 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6332 /* Update the BW information */
6333 ret = i40e_veb_get_bw_info(veb);
6335 dev_info(&pf->pdev->dev,
6336 "Failed getting veb bw config, err %s aq_err %s\n",
6337 i40e_stat_str(&pf->hw, ret),
6338 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6345 #ifdef CONFIG_I40E_DCB
6347 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6350 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6351 * the caller would've quiesce all the VSIs before calling
6354 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6360 /* Enable the TCs available on PF to all VEBs */
6361 tc_map = i40e_pf_get_tc_map(pf);
6362 for (v = 0; v < I40E_MAX_VEB; v++) {
6365 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6367 dev_info(&pf->pdev->dev,
6368 "Failed configuring TC for VEB seid=%d\n",
6370 /* Will try to configure as many components */
6374 /* Update each VSI */
6375 for (v = 0; v < pf->num_alloc_vsi; v++) {
6379 /* - Enable all TCs for the LAN VSI
6380 * - For all others keep them at TC0 for now
6382 if (v == pf->lan_vsi)
6383 tc_map = i40e_pf_get_tc_map(pf);
6385 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6387 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6389 dev_info(&pf->pdev->dev,
6390 "Failed configuring TC for VSI seid=%d\n",
6392 /* Will try to configure as many components */
6394 /* Re-configure VSI vectors based on updated TC map */
6395 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6396 if (pf->vsi[v]->netdev)
6397 i40e_dcbnl_set_all(pf->vsi[v]);
6403 * i40e_resume_port_tx - Resume port Tx
6406 * Resume a port's Tx and issue a PF reset in case of failure to
6409 static int i40e_resume_port_tx(struct i40e_pf *pf)
6411 struct i40e_hw *hw = &pf->hw;
6414 ret = i40e_aq_resume_port_tx(hw, NULL);
6416 dev_info(&pf->pdev->dev,
6417 "Resume Port Tx failed, err %s aq_err %s\n",
6418 i40e_stat_str(&pf->hw, ret),
6419 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6420 /* Schedule PF reset to recover */
6421 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6422 i40e_service_event_schedule(pf);
6429 * i40e_init_pf_dcb - Initialize DCB configuration
6430 * @pf: PF being configured
6432 * Query the current DCB configuration and cache it
6433 * in the hardware structure
6435 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6437 struct i40e_hw *hw = &pf->hw;
6440 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6441 * Also do not enable DCBx if FW LLDP agent is disabled
6443 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6444 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6445 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6446 err = I40E_NOT_SUPPORTED;
6450 err = i40e_init_dcb(hw, true);
6452 /* Device/Function is not DCBX capable */
6453 if ((!hw->func_caps.dcb) ||
6454 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6455 dev_info(&pf->pdev->dev,
6456 "DCBX offload is not supported or is disabled for this PF.\n");
6458 /* When status is not DISABLED then DCBX in FW */
6459 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6460 DCB_CAP_DCBX_VER_IEEE;
6462 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6463 /* Enable DCB tagging only when more than one TC
6464 * or explicitly disable if only one TC
6466 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6467 pf->flags |= I40E_FLAG_DCB_ENABLED;
6469 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6470 dev_dbg(&pf->pdev->dev,
6471 "DCBX offload is supported for this PF.\n");
6473 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6474 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6475 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6477 dev_info(&pf->pdev->dev,
6478 "Query for DCB configuration failed, err %s aq_err %s\n",
6479 i40e_stat_str(&pf->hw, err),
6480 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6486 #endif /* CONFIG_I40E_DCB */
6487 #define SPEED_SIZE 14
6490 * i40e_print_link_message - print link up or down
6491 * @vsi: the VSI for which link needs a message
6492 * @isup: true of link is up, false otherwise
6494 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6496 enum i40e_aq_link_speed new_speed;
6497 struct i40e_pf *pf = vsi->back;
6498 char *speed = "Unknown";
6499 char *fc = "Unknown";
6505 new_speed = pf->hw.phy.link_info.link_speed;
6507 new_speed = I40E_LINK_SPEED_UNKNOWN;
6509 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6511 vsi->current_isup = isup;
6512 vsi->current_speed = new_speed;
6514 netdev_info(vsi->netdev, "NIC Link is Down\n");
6518 /* Warn user if link speed on NPAR enabled partition is not at
6521 if (pf->hw.func_caps.npar_enable &&
6522 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6523 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6524 netdev_warn(vsi->netdev,
6525 "The partition detected link speed that is less than 10Gbps\n");
6527 switch (pf->hw.phy.link_info.link_speed) {
6528 case I40E_LINK_SPEED_40GB:
6531 case I40E_LINK_SPEED_20GB:
6534 case I40E_LINK_SPEED_25GB:
6537 case I40E_LINK_SPEED_10GB:
6540 case I40E_LINK_SPEED_5GB:
6543 case I40E_LINK_SPEED_2_5GB:
6546 case I40E_LINK_SPEED_1GB:
6549 case I40E_LINK_SPEED_100MB:
6556 switch (pf->hw.fc.current_mode) {
6560 case I40E_FC_TX_PAUSE:
6563 case I40E_FC_RX_PAUSE:
6571 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6576 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6579 if (pf->hw.phy.link_info.fec_info &
6580 I40E_AQ_CONFIG_FEC_KR_ENA)
6581 fec = "CL74 FC-FEC/BASE-R";
6582 else if (pf->hw.phy.link_info.fec_info &
6583 I40E_AQ_CONFIG_FEC_RS_ENA)
6584 fec = "CL108 RS-FEC";
6586 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6587 * both RS and FC are requested
6589 if (vsi->back->hw.phy.link_info.req_fec_info &
6590 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6591 if (vsi->back->hw.phy.link_info.req_fec_info &
6592 I40E_AQ_REQUEST_FEC_RS)
6593 req_fec = "CL108 RS-FEC";
6595 req_fec = "CL74 FC-FEC/BASE-R";
6597 netdev_info(vsi->netdev,
6598 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6599 speed, req_fec, fec, an, fc);
6601 netdev_info(vsi->netdev,
6602 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6609 * i40e_up_complete - Finish the last steps of bringing up a connection
6610 * @vsi: the VSI being configured
6612 static int i40e_up_complete(struct i40e_vsi *vsi)
6614 struct i40e_pf *pf = vsi->back;
6617 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6618 i40e_vsi_configure_msix(vsi);
6620 i40e_configure_msi_and_legacy(vsi);
6623 err = i40e_vsi_start_rings(vsi);
6627 clear_bit(__I40E_VSI_DOWN, vsi->state);
6628 i40e_napi_enable_all(vsi);
6629 i40e_vsi_enable_irq(vsi);
6631 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6633 i40e_print_link_message(vsi, true);
6634 netif_tx_start_all_queues(vsi->netdev);
6635 netif_carrier_on(vsi->netdev);
6638 /* replay FDIR SB filters */
6639 if (vsi->type == I40E_VSI_FDIR) {
6640 /* reset fd counters */
6643 i40e_fdir_filter_restore(vsi);
6646 /* On the next run of the service_task, notify any clients of the new
6649 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6650 i40e_service_event_schedule(pf);
6656 * i40e_vsi_reinit_locked - Reset the VSI
6657 * @vsi: the VSI being configured
6659 * Rebuild the ring structs after some configuration
6660 * has changed, e.g. MTU size.
6662 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6664 struct i40e_pf *pf = vsi->back;
6666 WARN_ON(in_interrupt());
6667 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6668 usleep_range(1000, 2000);
6672 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6676 * i40e_up - Bring the connection back up after being down
6677 * @vsi: the VSI being configured
6679 int i40e_up(struct i40e_vsi *vsi)
6683 err = i40e_vsi_configure(vsi);
6685 err = i40e_up_complete(vsi);
6691 * i40e_force_link_state - Force the link status
6692 * @pf: board private structure
6693 * @is_up: whether the link state should be forced up or down
6695 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6697 struct i40e_aq_get_phy_abilities_resp abilities;
6698 struct i40e_aq_set_phy_config config = {0};
6699 struct i40e_hw *hw = &pf->hw;
6704 /* Card might've been put in an unstable state by other drivers
6705 * and applications, which causes incorrect speed values being
6706 * set on startup. In order to clear speed registers, we call
6707 * get_phy_capabilities twice, once to get initial state of
6708 * available speeds, and once to get current PHY config.
6710 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6713 dev_err(&pf->pdev->dev,
6714 "failed to get phy cap., ret = %s last_status = %s\n",
6715 i40e_stat_str(hw, err),
6716 i40e_aq_str(hw, hw->aq.asq_last_status));
6719 speed = abilities.link_speed;
6721 /* Get the current phy config */
6722 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6725 dev_err(&pf->pdev->dev,
6726 "failed to get phy cap., ret = %s last_status = %s\n",
6727 i40e_stat_str(hw, err),
6728 i40e_aq_str(hw, hw->aq.asq_last_status));
6732 /* If link needs to go up, but was not forced to go down,
6733 * and its speed values are OK, no need for a flap
6735 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6736 return I40E_SUCCESS;
6738 /* To force link we need to set bits for all supported PHY types,
6739 * but there are now more than 32, so we need to split the bitmap
6740 * across two fields.
6742 mask = I40E_PHY_TYPES_BITMASK;
6743 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6744 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6745 /* Copy the old settings, except of phy_type */
6746 config.abilities = abilities.abilities;
6747 if (abilities.link_speed != 0)
6748 config.link_speed = abilities.link_speed;
6750 config.link_speed = speed;
6751 config.eee_capability = abilities.eee_capability;
6752 config.eeer = abilities.eeer_val;
6753 config.low_power_ctrl = abilities.d3_lpan;
6754 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6755 I40E_AQ_PHY_FEC_CONFIG_MASK;
6756 err = i40e_aq_set_phy_config(hw, &config, NULL);
6759 dev_err(&pf->pdev->dev,
6760 "set phy config ret = %s last_status = %s\n",
6761 i40e_stat_str(&pf->hw, err),
6762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6766 /* Update the link info */
6767 err = i40e_update_link_info(hw);
6769 /* Wait a little bit (on 40G cards it sometimes takes a really
6770 * long time for link to come back from the atomic reset)
6774 i40e_update_link_info(hw);
6777 i40e_aq_set_link_restart_an(hw, true, NULL);
6779 return I40E_SUCCESS;
6783 * i40e_down - Shutdown the connection processing
6784 * @vsi: the VSI being stopped
6786 void i40e_down(struct i40e_vsi *vsi)
6790 /* It is assumed that the caller of this function
6791 * sets the vsi->state __I40E_VSI_DOWN bit.
6794 netif_carrier_off(vsi->netdev);
6795 netif_tx_disable(vsi->netdev);
6797 i40e_vsi_disable_irq(vsi);
6798 i40e_vsi_stop_rings(vsi);
6799 if (vsi->type == I40E_VSI_MAIN &&
6800 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6801 i40e_force_link_state(vsi->back, false);
6802 i40e_napi_disable_all(vsi);
6804 for (i = 0; i < vsi->num_queue_pairs; i++) {
6805 i40e_clean_tx_ring(vsi->tx_rings[i]);
6806 if (i40e_enabled_xdp_vsi(vsi)) {
6807 /* Make sure that in-progress ndo_xdp_xmit and
6808 * ndo_xsk_wakeup calls are completed.
6811 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6813 i40e_clean_rx_ring(vsi->rx_rings[i]);
6819 * i40e_validate_mqprio_qopt- validate queue mapping info
6820 * @vsi: the VSI being configured
6821 * @mqprio_qopt: queue parametrs
6823 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6824 struct tc_mqprio_qopt_offload *mqprio_qopt)
6826 u64 sum_max_rate = 0;
6830 if (mqprio_qopt->qopt.offset[0] != 0 ||
6831 mqprio_qopt->qopt.num_tc < 1 ||
6832 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6834 for (i = 0; ; i++) {
6835 if (!mqprio_qopt->qopt.count[i])
6837 if (mqprio_qopt->min_rate[i]) {
6838 dev_err(&vsi->back->pdev->dev,
6839 "Invalid min tx rate (greater than 0) specified\n");
6842 max_rate = mqprio_qopt->max_rate[i];
6843 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6844 sum_max_rate += max_rate;
6846 if (i >= mqprio_qopt->qopt.num_tc - 1)
6848 if (mqprio_qopt->qopt.offset[i + 1] !=
6849 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6852 if (vsi->num_queue_pairs <
6853 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6856 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6857 dev_err(&vsi->back->pdev->dev,
6858 "Invalid max tx rate specified\n");
6865 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6866 * @vsi: the VSI being configured
6868 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6873 /* Only TC0 is enabled */
6874 vsi->tc_config.numtc = 1;
6875 vsi->tc_config.enabled_tc = 1;
6876 qcount = min_t(int, vsi->alloc_queue_pairs,
6877 i40e_pf_get_max_q_per_tc(vsi->back));
6878 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6879 /* For the TC that is not enabled set the offset to to default
6880 * queue and allocate one queue for the given TC.
6882 vsi->tc_config.tc_info[i].qoffset = 0;
6884 vsi->tc_config.tc_info[i].qcount = qcount;
6886 vsi->tc_config.tc_info[i].qcount = 1;
6887 vsi->tc_config.tc_info[i].netdev_tc = 0;
6892 * i40e_del_macvlan_filter
6893 * @hw: pointer to the HW structure
6894 * @seid: seid of the channel VSI
6895 * @macaddr: the mac address to apply as a filter
6896 * @aq_err: store the admin Q error
6898 * This function deletes a mac filter on the channel VSI which serves as the
6899 * macvlan. Returns 0 on success.
6901 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6902 const u8 *macaddr, int *aq_err)
6904 struct i40e_aqc_remove_macvlan_element_data element;
6907 memset(&element, 0, sizeof(element));
6908 ether_addr_copy(element.mac_addr, macaddr);
6909 element.vlan_tag = 0;
6910 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6911 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6912 *aq_err = hw->aq.asq_last_status;
6918 * i40e_add_macvlan_filter
6919 * @hw: pointer to the HW structure
6920 * @seid: seid of the channel VSI
6921 * @macaddr: the mac address to apply as a filter
6922 * @aq_err: store the admin Q error
6924 * This function adds a mac filter on the channel VSI which serves as the
6925 * macvlan. Returns 0 on success.
6927 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
6928 const u8 *macaddr, int *aq_err)
6930 struct i40e_aqc_add_macvlan_element_data element;
6934 ether_addr_copy(element.mac_addr, macaddr);
6935 element.vlan_tag = 0;
6936 element.queue_number = 0;
6937 element.match_method = I40E_AQC_MM_ERR_NO_RES;
6938 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6939 element.flags = cpu_to_le16(cmd_flags);
6940 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
6941 *aq_err = hw->aq.asq_last_status;
6947 * i40e_reset_ch_rings - Reset the queue contexts in a channel
6948 * @vsi: the VSI we want to access
6949 * @ch: the channel we want to access
6951 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
6953 struct i40e_ring *tx_ring, *rx_ring;
6957 for (i = 0; i < ch->num_queue_pairs; i++) {
6958 pf_q = ch->base_queue + i;
6959 tx_ring = vsi->tx_rings[pf_q];
6961 rx_ring = vsi->rx_rings[pf_q];
6967 * i40e_free_macvlan_channels
6968 * @vsi: the VSI we want to access
6970 * This function frees the Qs of the channel VSI from
6971 * the stack and also deletes the channel VSIs which
6972 * serve as macvlans.
6974 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
6976 struct i40e_channel *ch, *ch_tmp;
6979 if (list_empty(&vsi->macvlan_list))
6982 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
6983 struct i40e_vsi *parent_vsi;
6985 if (i40e_is_channel_macvlan(ch)) {
6986 i40e_reset_ch_rings(vsi, ch);
6987 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
6988 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
6989 netdev_set_sb_channel(ch->fwd->netdev, 0);
6994 list_del(&ch->list);
6995 parent_vsi = ch->parent_vsi;
6996 if (!parent_vsi || !ch->initialized) {
7001 /* remove the VSI */
7002 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7005 dev_err(&vsi->back->pdev->dev,
7006 "unable to remove channel (%d) for parent VSI(%d)\n",
7007 ch->seid, parent_vsi->seid);
7010 vsi->macvlan_cnt = 0;
7014 * i40e_fwd_ring_up - bring the macvlan device up
7015 * @vsi: the VSI we want to access
7016 * @vdev: macvlan netdevice
7017 * @fwd: the private fwd structure
7019 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7020 struct i40e_fwd_adapter *fwd)
7022 int ret = 0, num_tc = 1, i, aq_err;
7023 struct i40e_channel *ch, *ch_tmp;
7024 struct i40e_pf *pf = vsi->back;
7025 struct i40e_hw *hw = &pf->hw;
7027 if (list_empty(&vsi->macvlan_list))
7030 /* Go through the list and find an available channel */
7031 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7032 if (!i40e_is_channel_macvlan(ch)) {
7034 /* record configuration for macvlan interface in vdev */
7035 for (i = 0; i < num_tc; i++)
7036 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7038 ch->num_queue_pairs,
7040 for (i = 0; i < ch->num_queue_pairs; i++) {
7041 struct i40e_ring *tx_ring, *rx_ring;
7044 pf_q = ch->base_queue + i;
7046 /* Get to TX ring ptr */
7047 tx_ring = vsi->tx_rings[pf_q];
7050 /* Get the RX ring ptr */
7051 rx_ring = vsi->rx_rings[pf_q];
7058 /* Guarantee all rings are updated before we update the
7059 * MAC address filter.
7063 /* Add a mac filter */
7064 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7066 /* if we cannot add the MAC rule then disable the offload */
7067 macvlan_release_l2fw_offload(vdev);
7068 for (i = 0; i < ch->num_queue_pairs; i++) {
7069 struct i40e_ring *rx_ring;
7072 pf_q = ch->base_queue + i;
7073 rx_ring = vsi->rx_rings[pf_q];
7074 rx_ring->netdev = NULL;
7076 dev_info(&pf->pdev->dev,
7077 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7078 i40e_stat_str(hw, ret),
7079 i40e_aq_str(hw, aq_err));
7080 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7087 * i40e_setup_macvlans - create the channels which will be macvlans
7088 * @vsi: the VSI we want to access
7089 * @macvlan_cnt: no. of macvlans to be setup
7090 * @qcnt: no. of Qs per macvlan
7091 * @vdev: macvlan netdevice
7093 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7094 struct net_device *vdev)
7096 struct i40e_pf *pf = vsi->back;
7097 struct i40e_hw *hw = &pf->hw;
7098 struct i40e_vsi_context ctxt;
7099 u16 sections, qmap, num_qps;
7100 struct i40e_channel *ch;
7101 int i, pow, ret = 0;
7104 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7107 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7109 /* find the next higher power-of-2 of num queue pairs */
7110 pow = fls(roundup_pow_of_two(num_qps) - 1);
7112 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7113 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7115 /* Setup context bits for the main VSI */
7116 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7117 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7118 memset(&ctxt, 0, sizeof(ctxt));
7119 ctxt.seid = vsi->seid;
7120 ctxt.pf_num = vsi->back->hw.pf_id;
7122 ctxt.uplink_seid = vsi->uplink_seid;
7123 ctxt.info = vsi->info;
7124 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7125 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7126 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7127 ctxt.info.valid_sections |= cpu_to_le16(sections);
7129 /* Reconfigure RSS for main VSI with new max queue count */
7130 vsi->rss_size = max_t(u16, num_qps, qcnt);
7131 ret = i40e_vsi_config_rss(vsi);
7133 dev_info(&pf->pdev->dev,
7134 "Failed to reconfig RSS for num_queues (%u)\n",
7138 vsi->reconfig_rss = true;
7139 dev_dbg(&vsi->back->pdev->dev,
7140 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7141 vsi->next_base_queue = num_qps;
7142 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7144 /* Update the VSI after updating the VSI queue-mapping
7147 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7149 dev_info(&pf->pdev->dev,
7150 "Update vsi tc config failed, err %s aq_err %s\n",
7151 i40e_stat_str(hw, ret),
7152 i40e_aq_str(hw, hw->aq.asq_last_status));
7155 /* update the local VSI info with updated queue map */
7156 i40e_vsi_update_queue_map(vsi, &ctxt);
7157 vsi->info.valid_sections = 0;
7159 /* Create channels for macvlans */
7160 INIT_LIST_HEAD(&vsi->macvlan_list);
7161 for (i = 0; i < macvlan_cnt; i++) {
7162 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7167 INIT_LIST_HEAD(&ch->list);
7168 ch->num_queue_pairs = qcnt;
7169 if (!i40e_setup_channel(pf, vsi, ch)) {
7174 ch->parent_vsi = vsi;
7175 vsi->cnt_q_avail -= ch->num_queue_pairs;
7177 list_add_tail(&ch->list, &vsi->macvlan_list);
7183 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7184 i40e_free_macvlan_channels(vsi);
7190 * i40e_fwd_add - configure macvlans
7191 * @netdev: net device to configure
7192 * @vdev: macvlan netdevice
7194 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7196 struct i40e_netdev_priv *np = netdev_priv(netdev);
7197 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7198 struct i40e_vsi *vsi = np->vsi;
7199 struct i40e_pf *pf = vsi->back;
7200 struct i40e_fwd_adapter *fwd;
7201 int avail_macvlan, ret;
7203 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7204 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7205 return ERR_PTR(-EINVAL);
7207 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7208 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7209 return ERR_PTR(-EINVAL);
7211 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7212 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7213 return ERR_PTR(-EINVAL);
7216 /* The macvlan device has to be a single Q device so that the
7217 * tc_to_txq field can be reused to pick the tx queue.
7219 if (netif_is_multiqueue(vdev))
7220 return ERR_PTR(-ERANGE);
7222 if (!vsi->macvlan_cnt) {
7223 /* reserve bit 0 for the pf device */
7224 set_bit(0, vsi->fwd_bitmask);
7226 /* Try to reserve as many queues as possible for macvlans. First
7227 * reserve 3/4th of max vectors, then half, then quarter and
7228 * calculate Qs per macvlan as you go
7230 vectors = pf->num_lan_msix;
7231 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7232 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
7234 macvlan_cnt = (vectors - 32) / 4;
7235 } else if (vectors <= 64 && vectors > 32) {
7236 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
7238 macvlan_cnt = (vectors - 16) / 2;
7239 } else if (vectors <= 32 && vectors > 16) {
7240 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
7242 macvlan_cnt = vectors - 16;
7243 } else if (vectors <= 16 && vectors > 8) {
7244 /* allocate 1 Q per macvlan and 8 Qs to the PF */
7246 macvlan_cnt = vectors - 8;
7248 /* allocate 1 Q per macvlan and 1 Q to the PF */
7250 macvlan_cnt = vectors - 1;
7253 if (macvlan_cnt == 0)
7254 return ERR_PTR(-EBUSY);
7256 /* Quiesce VSI queues */
7257 i40e_quiesce_vsi(vsi);
7259 /* sets up the macvlans but does not "enable" them */
7260 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7263 return ERR_PTR(ret);
7266 i40e_unquiesce_vsi(vsi);
7268 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7270 if (avail_macvlan >= I40E_MAX_MACVLANS)
7271 return ERR_PTR(-EBUSY);
7273 /* create the fwd struct */
7274 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7276 return ERR_PTR(-ENOMEM);
7278 set_bit(avail_macvlan, vsi->fwd_bitmask);
7279 fwd->bit_no = avail_macvlan;
7280 netdev_set_sb_channel(vdev, avail_macvlan);
7283 if (!netif_running(netdev))
7286 /* Set fwd ring up */
7287 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7289 /* unbind the queues and drop the subordinate channel config */
7290 netdev_unbind_sb_channel(netdev, vdev);
7291 netdev_set_sb_channel(vdev, 0);
7294 return ERR_PTR(-EINVAL);
7301 * i40e_del_all_macvlans - Delete all the mac filters on the channels
7302 * @vsi: the VSI we want to access
7304 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7306 struct i40e_channel *ch, *ch_tmp;
7307 struct i40e_pf *pf = vsi->back;
7308 struct i40e_hw *hw = &pf->hw;
7309 int aq_err, ret = 0;
7311 if (list_empty(&vsi->macvlan_list))
7314 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7315 if (i40e_is_channel_macvlan(ch)) {
7316 ret = i40e_del_macvlan_filter(hw, ch->seid,
7317 i40e_channel_mac(ch),
7320 /* Reset queue contexts */
7321 i40e_reset_ch_rings(vsi, ch);
7322 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7323 netdev_unbind_sb_channel(vsi->netdev,
7325 netdev_set_sb_channel(ch->fwd->netdev, 0);
7334 * i40e_fwd_del - delete macvlan interfaces
7335 * @netdev: net device to configure
7336 * @vdev: macvlan netdevice
7338 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7340 struct i40e_netdev_priv *np = netdev_priv(netdev);
7341 struct i40e_fwd_adapter *fwd = vdev;
7342 struct i40e_channel *ch, *ch_tmp;
7343 struct i40e_vsi *vsi = np->vsi;
7344 struct i40e_pf *pf = vsi->back;
7345 struct i40e_hw *hw = &pf->hw;
7346 int aq_err, ret = 0;
7348 /* Find the channel associated with the macvlan and del mac filter */
7349 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7350 if (i40e_is_channel_macvlan(ch) &&
7351 ether_addr_equal(i40e_channel_mac(ch),
7352 fwd->netdev->dev_addr)) {
7353 ret = i40e_del_macvlan_filter(hw, ch->seid,
7354 i40e_channel_mac(ch),
7357 /* Reset queue contexts */
7358 i40e_reset_ch_rings(vsi, ch);
7359 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7360 netdev_unbind_sb_channel(netdev, fwd->netdev);
7361 netdev_set_sb_channel(fwd->netdev, 0);
7365 dev_info(&pf->pdev->dev,
7366 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7367 i40e_stat_str(hw, ret),
7368 i40e_aq_str(hw, aq_err));
7376 * i40e_setup_tc - configure multiple traffic classes
7377 * @netdev: net device to configure
7378 * @type_data: tc offload data
7380 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7382 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7383 struct i40e_netdev_priv *np = netdev_priv(netdev);
7384 struct i40e_vsi *vsi = np->vsi;
7385 struct i40e_pf *pf = vsi->back;
7386 u8 enabled_tc = 0, num_tc, hw;
7387 bool need_reset = false;
7388 int old_queue_pairs;
7393 old_queue_pairs = vsi->num_queue_pairs;
7394 num_tc = mqprio_qopt->qopt.num_tc;
7395 hw = mqprio_qopt->qopt.hw;
7396 mode = mqprio_qopt->mode;
7398 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7399 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7403 /* Check if MFP enabled */
7404 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7406 "Configuring TC not supported in MFP mode\n");
7410 case TC_MQPRIO_MODE_DCB:
7411 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7413 /* Check if DCB enabled to continue */
7414 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7416 "DCB is not enabled for adapter\n");
7420 /* Check whether tc count is within enabled limit */
7421 if (num_tc > i40e_pf_get_num_tc(pf)) {
7423 "TC count greater than enabled on link for adapter\n");
7427 case TC_MQPRIO_MODE_CHANNEL:
7428 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7430 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7433 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7435 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7438 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7439 sizeof(*mqprio_qopt));
7440 pf->flags |= I40E_FLAG_TC_MQPRIO;
7441 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7448 /* Generate TC map for number of tc requested */
7449 for (i = 0; i < num_tc; i++)
7450 enabled_tc |= BIT(i);
7452 /* Requesting same TC configuration as already enabled */
7453 if (enabled_tc == vsi->tc_config.enabled_tc &&
7454 mode != TC_MQPRIO_MODE_CHANNEL)
7457 /* Quiesce VSI queues */
7458 i40e_quiesce_vsi(vsi);
7460 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7461 i40e_remove_queue_channels(vsi);
7463 /* Configure VSI for enabled TCs */
7464 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7466 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7471 dev_info(&vsi->back->pdev->dev,
7472 "Setup channel (id:%u) utilizing num_queues %d\n",
7473 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7476 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7477 if (vsi->mqprio_qopt.max_rate[0]) {
7478 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7480 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7481 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7483 u64 credits = max_tx_rate;
7485 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7486 dev_dbg(&vsi->back->pdev->dev,
7487 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7496 ret = i40e_configure_queue_channels(vsi);
7498 vsi->num_queue_pairs = old_queue_pairs;
7500 "Failed configuring queue channels\n");
7507 /* Reset the configuration data to defaults, only TC0 is enabled */
7509 i40e_vsi_set_default_tc_config(vsi);
7514 i40e_unquiesce_vsi(vsi);
7519 * i40e_set_cld_element - sets cloud filter element data
7520 * @filter: cloud filter rule
7521 * @cld: ptr to cloud filter element data
7523 * This is helper function to copy data into cloud filter element
7526 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7527 struct i40e_aqc_cloud_filters_element_data *cld)
7532 memset(cld, 0, sizeof(*cld));
7533 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7534 ether_addr_copy(cld->inner_mac, filter->src_mac);
7536 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7539 if (filter->n_proto == ETH_P_IPV6) {
7540 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7541 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7543 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7544 ipa = cpu_to_le32(ipa);
7545 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7548 ipa = be32_to_cpu(filter->dst_ipv4);
7549 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7552 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7554 /* tenant_id is not supported by FW now, once the support is enabled
7555 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7557 if (filter->tenant_id)
7562 * i40e_add_del_cloud_filter - Add/del cloud filter
7563 * @vsi: pointer to VSI
7564 * @filter: cloud filter rule
7565 * @add: if true, add, if false, delete
7567 * Add or delete a cloud filter for a specific flow spec.
7568 * Returns 0 if the filter were successfully added.
7570 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7571 struct i40e_cloud_filter *filter, bool add)
7573 struct i40e_aqc_cloud_filters_element_data cld_filter;
7574 struct i40e_pf *pf = vsi->back;
7576 static const u16 flag_table[128] = {
7577 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7578 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7579 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7580 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7581 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7582 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7583 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7584 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7585 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7586 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7587 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7588 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7589 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7590 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7593 if (filter->flags >= ARRAY_SIZE(flag_table))
7594 return I40E_ERR_CONFIG;
7596 /* copy element needed to add cloud filter from filter */
7597 i40e_set_cld_element(filter, &cld_filter);
7599 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7600 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7601 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7603 if (filter->n_proto == ETH_P_IPV6)
7604 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7605 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7607 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7608 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7611 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7614 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7617 dev_dbg(&pf->pdev->dev,
7618 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7619 add ? "add" : "delete", filter->dst_port, ret,
7620 pf->hw.aq.asq_last_status);
7622 dev_info(&pf->pdev->dev,
7623 "%s cloud filter for VSI: %d\n",
7624 add ? "Added" : "Deleted", filter->seid);
7629 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7630 * @vsi: pointer to VSI
7631 * @filter: cloud filter rule
7632 * @add: if true, add, if false, delete
7634 * Add or delete a cloud filter for a specific flow spec using big buffer.
7635 * Returns 0 if the filter were successfully added.
7637 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7638 struct i40e_cloud_filter *filter,
7641 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7642 struct i40e_pf *pf = vsi->back;
7645 /* Both (src/dst) valid mac_addr are not supported */
7646 if ((is_valid_ether_addr(filter->dst_mac) &&
7647 is_valid_ether_addr(filter->src_mac)) ||
7648 (is_multicast_ether_addr(filter->dst_mac) &&
7649 is_multicast_ether_addr(filter->src_mac)))
7652 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7653 * ports are not supported via big buffer now.
7655 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7658 /* adding filter using src_port/src_ip is not supported at this stage */
7659 if (filter->src_port || filter->src_ipv4 ||
7660 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7663 /* copy element needed to add cloud filter from filter */
7664 i40e_set_cld_element(filter, &cld_filter.element);
7666 if (is_valid_ether_addr(filter->dst_mac) ||
7667 is_valid_ether_addr(filter->src_mac) ||
7668 is_multicast_ether_addr(filter->dst_mac) ||
7669 is_multicast_ether_addr(filter->src_mac)) {
7670 /* MAC + IP : unsupported mode */
7671 if (filter->dst_ipv4)
7674 /* since we validated that L4 port must be valid before
7675 * we get here, start with respective "flags" value
7676 * and update if vlan is present or not
7678 cld_filter.element.flags =
7679 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7681 if (filter->vlan_id) {
7682 cld_filter.element.flags =
7683 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7686 } else if (filter->dst_ipv4 ||
7687 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7688 cld_filter.element.flags =
7689 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7690 if (filter->n_proto == ETH_P_IPV6)
7691 cld_filter.element.flags |=
7692 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7694 cld_filter.element.flags |=
7695 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7697 dev_err(&pf->pdev->dev,
7698 "either mac or ip has to be valid for cloud filter\n");
7702 /* Now copy L4 port in Byte 6..7 in general fields */
7703 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7704 be16_to_cpu(filter->dst_port);
7707 /* Validate current device switch mode, change if necessary */
7708 ret = i40e_validate_and_set_switch_mode(vsi);
7710 dev_err(&pf->pdev->dev,
7711 "failed to set switch mode, ret %d\n",
7716 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7719 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7724 dev_dbg(&pf->pdev->dev,
7725 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7726 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7728 dev_info(&pf->pdev->dev,
7729 "%s cloud filter for VSI: %d, L4 port: %d\n",
7730 add ? "add" : "delete", filter->seid,
7731 ntohs(filter->dst_port));
7736 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7737 * @vsi: Pointer to VSI
7738 * @cls_flower: Pointer to struct flow_cls_offload
7739 * @filter: Pointer to cloud filter structure
7742 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7743 struct flow_cls_offload *f,
7744 struct i40e_cloud_filter *filter)
7746 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7747 struct flow_dissector *dissector = rule->match.dissector;
7748 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7749 struct i40e_pf *pf = vsi->back;
7752 if (dissector->used_keys &
7753 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7754 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7755 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7756 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7757 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7758 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7759 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7760 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7761 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7762 dissector->used_keys);
7766 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7767 struct flow_match_enc_keyid match;
7769 flow_rule_match_enc_keyid(rule, &match);
7770 if (match.mask->keyid != 0)
7771 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7773 filter->tenant_id = be32_to_cpu(match.key->keyid);
7776 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7777 struct flow_match_basic match;
7779 flow_rule_match_basic(rule, &match);
7780 n_proto_key = ntohs(match.key->n_proto);
7781 n_proto_mask = ntohs(match.mask->n_proto);
7783 if (n_proto_key == ETH_P_ALL) {
7787 filter->n_proto = n_proto_key & n_proto_mask;
7788 filter->ip_proto = match.key->ip_proto;
7791 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7792 struct flow_match_eth_addrs match;
7794 flow_rule_match_eth_addrs(rule, &match);
7796 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7797 if (!is_zero_ether_addr(match.mask->dst)) {
7798 if (is_broadcast_ether_addr(match.mask->dst)) {
7799 field_flags |= I40E_CLOUD_FIELD_OMAC;
7801 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7803 return I40E_ERR_CONFIG;
7807 if (!is_zero_ether_addr(match.mask->src)) {
7808 if (is_broadcast_ether_addr(match.mask->src)) {
7809 field_flags |= I40E_CLOUD_FIELD_IMAC;
7811 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7813 return I40E_ERR_CONFIG;
7816 ether_addr_copy(filter->dst_mac, match.key->dst);
7817 ether_addr_copy(filter->src_mac, match.key->src);
7820 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7821 struct flow_match_vlan match;
7823 flow_rule_match_vlan(rule, &match);
7824 if (match.mask->vlan_id) {
7825 if (match.mask->vlan_id == VLAN_VID_MASK) {
7826 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7829 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7830 match.mask->vlan_id);
7831 return I40E_ERR_CONFIG;
7835 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7838 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7839 struct flow_match_control match;
7841 flow_rule_match_control(rule, &match);
7842 addr_type = match.key->addr_type;
7845 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7846 struct flow_match_ipv4_addrs match;
7848 flow_rule_match_ipv4_addrs(rule, &match);
7849 if (match.mask->dst) {
7850 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7851 field_flags |= I40E_CLOUD_FIELD_IIP;
7853 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7855 return I40E_ERR_CONFIG;
7859 if (match.mask->src) {
7860 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7861 field_flags |= I40E_CLOUD_FIELD_IIP;
7863 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7865 return I40E_ERR_CONFIG;
7869 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7870 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7871 return I40E_ERR_CONFIG;
7873 filter->dst_ipv4 = match.key->dst;
7874 filter->src_ipv4 = match.key->src;
7877 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7878 struct flow_match_ipv6_addrs match;
7880 flow_rule_match_ipv6_addrs(rule, &match);
7882 /* src and dest IPV6 address should not be LOOPBACK
7883 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7885 if (ipv6_addr_loopback(&match.key->dst) ||
7886 ipv6_addr_loopback(&match.key->src)) {
7887 dev_err(&pf->pdev->dev,
7888 "Bad ipv6, addr is LOOPBACK\n");
7889 return I40E_ERR_CONFIG;
7891 if (!ipv6_addr_any(&match.mask->dst) ||
7892 !ipv6_addr_any(&match.mask->src))
7893 field_flags |= I40E_CLOUD_FIELD_IIP;
7895 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7896 sizeof(filter->src_ipv6));
7897 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7898 sizeof(filter->dst_ipv6));
7901 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7902 struct flow_match_ports match;
7904 flow_rule_match_ports(rule, &match);
7905 if (match.mask->src) {
7906 if (match.mask->src == cpu_to_be16(0xffff)) {
7907 field_flags |= I40E_CLOUD_FIELD_IIP;
7909 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7910 be16_to_cpu(match.mask->src));
7911 return I40E_ERR_CONFIG;
7915 if (match.mask->dst) {
7916 if (match.mask->dst == cpu_to_be16(0xffff)) {
7917 field_flags |= I40E_CLOUD_FIELD_IIP;
7919 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7920 be16_to_cpu(match.mask->dst));
7921 return I40E_ERR_CONFIG;
7925 filter->dst_port = match.key->dst;
7926 filter->src_port = match.key->src;
7928 switch (filter->ip_proto) {
7933 dev_err(&pf->pdev->dev,
7934 "Only UDP and TCP transport are supported\n");
7938 filter->flags = field_flags;
7943 * i40e_handle_tclass: Forward to a traffic class on the device
7944 * @vsi: Pointer to VSI
7945 * @tc: traffic class index on the device
7946 * @filter: Pointer to cloud filter structure
7949 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7950 struct i40e_cloud_filter *filter)
7952 struct i40e_channel *ch, *ch_tmp;
7954 /* direct to a traffic class on the same device */
7956 filter->seid = vsi->seid;
7958 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7959 if (!filter->dst_port) {
7960 dev_err(&vsi->back->pdev->dev,
7961 "Specify destination port to direct to traffic class that is not default\n");
7964 if (list_empty(&vsi->ch_list))
7966 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7968 if (ch->seid == vsi->tc_seid_map[tc])
7969 filter->seid = ch->seid;
7973 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7978 * i40e_configure_clsflower - Configure tc flower filters
7979 * @vsi: Pointer to VSI
7980 * @cls_flower: Pointer to struct flow_cls_offload
7983 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7984 struct flow_cls_offload *cls_flower)
7986 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7987 struct i40e_cloud_filter *filter = NULL;
7988 struct i40e_pf *pf = vsi->back;
7992 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7996 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7997 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8000 if (pf->fdir_pf_active_filters ||
8001 (!hlist_empty(&pf->fdir_filter_list))) {
8002 dev_err(&vsi->back->pdev->dev,
8003 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8007 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8008 dev_err(&vsi->back->pdev->dev,
8009 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8010 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8011 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8014 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8018 filter->cookie = cls_flower->cookie;
8020 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8024 err = i40e_handle_tclass(vsi, tc, filter);
8028 /* Add cloud filter */
8029 if (filter->dst_port)
8030 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8032 err = i40e_add_del_cloud_filter(vsi, filter, true);
8035 dev_err(&pf->pdev->dev,
8036 "Failed to add cloud filter, err %s\n",
8037 i40e_stat_str(&pf->hw, err));
8041 /* add filter to the ordered list */
8042 INIT_HLIST_NODE(&filter->cloud_node);
8044 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8046 pf->num_cloud_filters++;
8055 * i40e_find_cloud_filter - Find the could filter in the list
8056 * @vsi: Pointer to VSI
8057 * @cookie: filter specific cookie
8060 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8061 unsigned long *cookie)
8063 struct i40e_cloud_filter *filter = NULL;
8064 struct hlist_node *node2;
8066 hlist_for_each_entry_safe(filter, node2,
8067 &vsi->back->cloud_filter_list, cloud_node)
8068 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8074 * i40e_delete_clsflower - Remove tc flower filters
8075 * @vsi: Pointer to VSI
8076 * @cls_flower: Pointer to struct flow_cls_offload
8079 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8080 struct flow_cls_offload *cls_flower)
8082 struct i40e_cloud_filter *filter = NULL;
8083 struct i40e_pf *pf = vsi->back;
8086 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8091 hash_del(&filter->cloud_node);
8093 if (filter->dst_port)
8094 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8096 err = i40e_add_del_cloud_filter(vsi, filter, false);
8100 dev_err(&pf->pdev->dev,
8101 "Failed to delete cloud filter, err %s\n",
8102 i40e_stat_str(&pf->hw, err));
8103 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8106 pf->num_cloud_filters--;
8107 if (!pf->num_cloud_filters)
8108 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8109 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8110 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8111 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8112 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8118 * i40e_setup_tc_cls_flower - flower classifier offloads
8119 * @netdev: net device to configure
8120 * @type_data: offload data
8122 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8123 struct flow_cls_offload *cls_flower)
8125 struct i40e_vsi *vsi = np->vsi;
8127 switch (cls_flower->command) {
8128 case FLOW_CLS_REPLACE:
8129 return i40e_configure_clsflower(vsi, cls_flower);
8130 case FLOW_CLS_DESTROY:
8131 return i40e_delete_clsflower(vsi, cls_flower);
8132 case FLOW_CLS_STATS:
8139 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8142 struct i40e_netdev_priv *np = cb_priv;
8144 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8148 case TC_SETUP_CLSFLOWER:
8149 return i40e_setup_tc_cls_flower(np, type_data);
8156 static LIST_HEAD(i40e_block_cb_list);
8158 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8161 struct i40e_netdev_priv *np = netdev_priv(netdev);
8164 case TC_SETUP_QDISC_MQPRIO:
8165 return i40e_setup_tc(netdev, type_data);
8166 case TC_SETUP_BLOCK:
8167 return flow_block_cb_setup_simple(type_data,
8168 &i40e_block_cb_list,
8169 i40e_setup_tc_block_cb,
8177 * i40e_open - Called when a network interface is made active
8178 * @netdev: network interface device structure
8180 * The open entry point is called when a network interface is made
8181 * active by the system (IFF_UP). At this point all resources needed
8182 * for transmit and receive operations are allocated, the interrupt
8183 * handler is registered with the OS, the netdev watchdog subtask is
8184 * enabled, and the stack is notified that the interface is ready.
8186 * Returns 0 on success, negative value on failure
8188 int i40e_open(struct net_device *netdev)
8190 struct i40e_netdev_priv *np = netdev_priv(netdev);
8191 struct i40e_vsi *vsi = np->vsi;
8192 struct i40e_pf *pf = vsi->back;
8195 /* disallow open during test or if eeprom is broken */
8196 if (test_bit(__I40E_TESTING, pf->state) ||
8197 test_bit(__I40E_BAD_EEPROM, pf->state))
8200 netif_carrier_off(netdev);
8202 if (i40e_force_link_state(pf, true))
8205 err = i40e_vsi_open(vsi);
8209 /* configure global TSO hardware offload settings */
8210 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8211 TCP_FLAG_FIN) >> 16);
8212 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8214 TCP_FLAG_CWR) >> 16);
8215 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8217 udp_tunnel_get_rx_info(netdev);
8224 * @vsi: the VSI to open
8226 * Finish initialization of the VSI.
8228 * Returns 0 on success, negative value on failure
8230 * Note: expects to be called while under rtnl_lock()
8232 int i40e_vsi_open(struct i40e_vsi *vsi)
8234 struct i40e_pf *pf = vsi->back;
8235 char int_name[I40E_INT_NAME_STR_LEN];
8238 /* allocate descriptors */
8239 err = i40e_vsi_setup_tx_resources(vsi);
8242 err = i40e_vsi_setup_rx_resources(vsi);
8246 err = i40e_vsi_configure(vsi);
8251 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8252 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8253 err = i40e_vsi_request_irq(vsi, int_name);
8257 /* Notify the stack of the actual queue counts. */
8258 err = netif_set_real_num_tx_queues(vsi->netdev,
8259 vsi->num_queue_pairs);
8261 goto err_set_queues;
8263 err = netif_set_real_num_rx_queues(vsi->netdev,
8264 vsi->num_queue_pairs);
8266 goto err_set_queues;
8268 } else if (vsi->type == I40E_VSI_FDIR) {
8269 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8270 dev_driver_string(&pf->pdev->dev),
8271 dev_name(&pf->pdev->dev));
8272 err = i40e_vsi_request_irq(vsi, int_name);
8279 err = i40e_up_complete(vsi);
8281 goto err_up_complete;
8288 i40e_vsi_free_irq(vsi);
8290 i40e_vsi_free_rx_resources(vsi);
8292 i40e_vsi_free_tx_resources(vsi);
8293 if (vsi == pf->vsi[pf->lan_vsi])
8294 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8300 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
8301 * @pf: Pointer to PF
8303 * This function destroys the hlist where all the Flow Director
8304 * filters were saved.
8306 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8308 struct i40e_fdir_filter *filter;
8309 struct i40e_flex_pit *pit_entry, *tmp;
8310 struct hlist_node *node2;
8312 hlist_for_each_entry_safe(filter, node2,
8313 &pf->fdir_filter_list, fdir_node) {
8314 hlist_del(&filter->fdir_node);
8318 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8319 list_del(&pit_entry->list);
8322 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8324 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8325 list_del(&pit_entry->list);
8328 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8330 pf->fdir_pf_active_filters = 0;
8331 pf->fd_tcp4_filter_cnt = 0;
8332 pf->fd_udp4_filter_cnt = 0;
8333 pf->fd_sctp4_filter_cnt = 0;
8334 pf->fd_ip4_filter_cnt = 0;
8336 /* Reprogram the default input set for TCP/IPv4 */
8337 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8338 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8339 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8341 /* Reprogram the default input set for UDP/IPv4 */
8342 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8343 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8344 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8346 /* Reprogram the default input set for SCTP/IPv4 */
8347 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8348 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8349 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8351 /* Reprogram the default input set for Other/IPv4 */
8352 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8353 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8355 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8356 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8360 * i40e_cloud_filter_exit - Cleans up the cloud filters
8361 * @pf: Pointer to PF
8363 * This function destroys the hlist where all the cloud filters
8366 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8368 struct i40e_cloud_filter *cfilter;
8369 struct hlist_node *node;
8371 hlist_for_each_entry_safe(cfilter, node,
8372 &pf->cloud_filter_list, cloud_node) {
8373 hlist_del(&cfilter->cloud_node);
8376 pf->num_cloud_filters = 0;
8378 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8379 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8380 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8381 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8382 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8387 * i40e_close - Disables a network interface
8388 * @netdev: network interface device structure
8390 * The close entry point is called when an interface is de-activated
8391 * by the OS. The hardware is still under the driver's control, but
8392 * this netdev interface is disabled.
8394 * Returns 0, this is not allowed to fail
8396 int i40e_close(struct net_device *netdev)
8398 struct i40e_netdev_priv *np = netdev_priv(netdev);
8399 struct i40e_vsi *vsi = np->vsi;
8401 i40e_vsi_close(vsi);
8407 * i40e_do_reset - Start a PF or Core Reset sequence
8408 * @pf: board private structure
8409 * @reset_flags: which reset is requested
8410 * @lock_acquired: indicates whether or not the lock has been acquired
8411 * before this function was called.
8413 * The essential difference in resets is that the PF Reset
8414 * doesn't clear the packet buffers, doesn't reset the PE
8415 * firmware, and doesn't bother the other PFs on the chip.
8417 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8421 WARN_ON(in_interrupt());
8424 /* do the biggest reset indicated */
8425 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8427 /* Request a Global Reset
8429 * This will start the chip's countdown to the actual full
8430 * chip reset event, and a warning interrupt to be sent
8431 * to all PFs, including the requestor. Our handler
8432 * for the warning interrupt will deal with the shutdown
8433 * and recovery of the switch setup.
8435 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8436 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8437 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8438 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8440 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8442 /* Request a Core Reset
8444 * Same as Global Reset, except does *not* include the MAC/PHY
8446 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8447 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8448 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8449 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8450 i40e_flush(&pf->hw);
8452 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8454 /* Request a PF Reset
8456 * Resets only the PF-specific registers
8458 * This goes directly to the tear-down and rebuild of
8459 * the switch, since we need to do all the recovery as
8460 * for the Core Reset.
8462 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8463 i40e_handle_reset_warning(pf, lock_acquired);
8465 dev_info(&pf->pdev->dev,
8466 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8467 "FW LLDP is disabled\n" :
8468 "FW LLDP is enabled\n");
8470 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8473 /* Find the VSI(s) that requested a re-init */
8474 dev_info(&pf->pdev->dev,
8475 "VSI reinit requested\n");
8476 for (v = 0; v < pf->num_alloc_vsi; v++) {
8477 struct i40e_vsi *vsi = pf->vsi[v];
8480 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8482 i40e_vsi_reinit_locked(pf->vsi[v]);
8484 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8487 /* Find the VSI(s) that needs to be brought down */
8488 dev_info(&pf->pdev->dev, "VSI down requested\n");
8489 for (v = 0; v < pf->num_alloc_vsi; v++) {
8490 struct i40e_vsi *vsi = pf->vsi[v];
8493 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8495 set_bit(__I40E_VSI_DOWN, vsi->state);
8500 dev_info(&pf->pdev->dev,
8501 "bad reset request 0x%08x\n", reset_flags);
8505 #ifdef CONFIG_I40E_DCB
8507 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8508 * @pf: board private structure
8509 * @old_cfg: current DCB config
8510 * @new_cfg: new DCB config
8512 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8513 struct i40e_dcbx_config *old_cfg,
8514 struct i40e_dcbx_config *new_cfg)
8516 bool need_reconfig = false;
8518 /* Check if ETS configuration has changed */
8519 if (memcmp(&new_cfg->etscfg,
8521 sizeof(new_cfg->etscfg))) {
8522 /* If Priority Table has changed reconfig is needed */
8523 if (memcmp(&new_cfg->etscfg.prioritytable,
8524 &old_cfg->etscfg.prioritytable,
8525 sizeof(new_cfg->etscfg.prioritytable))) {
8526 need_reconfig = true;
8527 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8530 if (memcmp(&new_cfg->etscfg.tcbwtable,
8531 &old_cfg->etscfg.tcbwtable,
8532 sizeof(new_cfg->etscfg.tcbwtable)))
8533 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8535 if (memcmp(&new_cfg->etscfg.tsatable,
8536 &old_cfg->etscfg.tsatable,
8537 sizeof(new_cfg->etscfg.tsatable)))
8538 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8541 /* Check if PFC configuration has changed */
8542 if (memcmp(&new_cfg->pfc,
8544 sizeof(new_cfg->pfc))) {
8545 need_reconfig = true;
8546 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8549 /* Check if APP Table has changed */
8550 if (memcmp(&new_cfg->app,
8552 sizeof(new_cfg->app))) {
8553 need_reconfig = true;
8554 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8557 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8558 return need_reconfig;
8562 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8563 * @pf: board private structure
8564 * @e: event info posted on ARQ
8566 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8567 struct i40e_arq_event_info *e)
8569 struct i40e_aqc_lldp_get_mib *mib =
8570 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8571 struct i40e_hw *hw = &pf->hw;
8572 struct i40e_dcbx_config tmp_dcbx_cfg;
8573 bool need_reconfig = false;
8577 /* Not DCB capable or capability disabled */
8578 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8581 /* Ignore if event is not for Nearest Bridge */
8582 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8583 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8584 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8585 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8588 /* Check MIB Type and return if event for Remote MIB update */
8589 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8590 dev_dbg(&pf->pdev->dev,
8591 "LLDP event mib type %s\n", type ? "remote" : "local");
8592 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8593 /* Update the remote cached instance and return */
8594 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8595 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8596 &hw->remote_dcbx_config);
8600 /* Store the old configuration */
8601 tmp_dcbx_cfg = hw->local_dcbx_config;
8603 /* Reset the old DCBx configuration data */
8604 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8605 /* Get updated DCBX data from firmware */
8606 ret = i40e_get_dcb_config(&pf->hw);
8608 dev_info(&pf->pdev->dev,
8609 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8610 i40e_stat_str(&pf->hw, ret),
8611 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8615 /* No change detected in DCBX configs */
8616 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8617 sizeof(tmp_dcbx_cfg))) {
8618 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8622 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8623 &hw->local_dcbx_config);
8625 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8630 /* Enable DCB tagging only when more than one TC */
8631 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8632 pf->flags |= I40E_FLAG_DCB_ENABLED;
8634 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8636 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8637 /* Reconfiguration needed quiesce all VSIs */
8638 i40e_pf_quiesce_all_vsi(pf);
8640 /* Changes in configuration update VEB/VSI */
8641 i40e_dcb_reconfigure(pf);
8643 ret = i40e_resume_port_tx(pf);
8645 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8646 /* In case of error no point in resuming VSIs */
8650 /* Wait for the PF's queues to be disabled */
8651 ret = i40e_pf_wait_queues_disabled(pf);
8653 /* Schedule PF reset to recover */
8654 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8655 i40e_service_event_schedule(pf);
8657 i40e_pf_unquiesce_all_vsi(pf);
8658 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8659 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8665 #endif /* CONFIG_I40E_DCB */
8668 * i40e_do_reset_safe - Protected reset path for userland calls.
8669 * @pf: board private structure
8670 * @reset_flags: which reset is requested
8673 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8676 i40e_do_reset(pf, reset_flags, true);
8681 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8682 * @pf: board private structure
8683 * @e: event info posted on ARQ
8685 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8688 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8689 struct i40e_arq_event_info *e)
8691 struct i40e_aqc_lan_overflow *data =
8692 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8693 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8694 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8695 struct i40e_hw *hw = &pf->hw;
8699 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8702 /* Queue belongs to VF, find the VF and issue VF reset */
8703 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8704 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8705 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8706 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8707 vf_id -= hw->func_caps.vf_base_id;
8708 vf = &pf->vf[vf_id];
8709 i40e_vc_notify_vf_reset(vf);
8710 /* Allow VF to process pending reset notification */
8712 i40e_reset_vf(vf, false);
8717 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8718 * @pf: board private structure
8720 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8724 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8725 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8730 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8731 * @pf: board private structure
8733 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8737 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8738 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8739 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8740 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8745 * i40e_get_global_fd_count - Get total FD filters programmed on device
8746 * @pf: board private structure
8748 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8752 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8753 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8754 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8755 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8760 * i40e_reenable_fdir_sb - Restore FDir SB capability
8761 * @pf: board private structure
8763 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8765 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8766 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8767 (I40E_DEBUG_FD & pf->hw.debug_mask))
8768 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8772 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8773 * @pf: board private structure
8775 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8777 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8778 /* ATR uses the same filtering logic as SB rules. It only
8779 * functions properly if the input set mask is at the default
8780 * settings. It is safe to restore the default input set
8781 * because there are no active TCPv4 filter rules.
8783 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8784 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8785 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8787 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8788 (I40E_DEBUG_FD & pf->hw.debug_mask))
8789 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8794 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8795 * @pf: board private structure
8796 * @filter: FDir filter to remove
8798 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8799 struct i40e_fdir_filter *filter)
8801 /* Update counters */
8802 pf->fdir_pf_active_filters--;
8805 switch (filter->flow_type) {
8807 pf->fd_tcp4_filter_cnt--;
8810 pf->fd_udp4_filter_cnt--;
8813 pf->fd_sctp4_filter_cnt--;
8816 switch (filter->ip4_proto) {
8818 pf->fd_tcp4_filter_cnt--;
8821 pf->fd_udp4_filter_cnt--;
8824 pf->fd_sctp4_filter_cnt--;
8827 pf->fd_ip4_filter_cnt--;
8833 /* Remove the filter from the list and free memory */
8834 hlist_del(&filter->fdir_node);
8839 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8840 * @pf: board private structure
8842 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8844 struct i40e_fdir_filter *filter;
8845 u32 fcnt_prog, fcnt_avail;
8846 struct hlist_node *node;
8848 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8851 /* Check if we have enough room to re-enable FDir SB capability. */
8852 fcnt_prog = i40e_get_global_fd_count(pf);
8853 fcnt_avail = pf->fdir_pf_filter_count;
8854 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8855 (pf->fd_add_err == 0) ||
8856 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8857 i40e_reenable_fdir_sb(pf);
8859 /* We should wait for even more space before re-enabling ATR.
8860 * Additionally, we cannot enable ATR as long as we still have TCP SB
8863 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8864 (pf->fd_tcp4_filter_cnt == 0))
8865 i40e_reenable_fdir_atr(pf);
8867 /* if hw had a problem adding a filter, delete it */
8868 if (pf->fd_inv > 0) {
8869 hlist_for_each_entry_safe(filter, node,
8870 &pf->fdir_filter_list, fdir_node)
8871 if (filter->fd_id == pf->fd_inv)
8872 i40e_delete_invalid_filter(pf, filter);
8876 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8877 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8879 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8880 * @pf: board private structure
8882 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8884 unsigned long min_flush_time;
8885 int flush_wait_retry = 50;
8886 bool disable_atr = false;
8890 if (!time_after(jiffies, pf->fd_flush_timestamp +
8891 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8894 /* If the flush is happening too quick and we have mostly SB rules we
8895 * should not re-enable ATR for some time.
8897 min_flush_time = pf->fd_flush_timestamp +
8898 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8899 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8901 if (!(time_after(jiffies, min_flush_time)) &&
8902 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8903 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8904 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8908 pf->fd_flush_timestamp = jiffies;
8909 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8910 /* flush all filters */
8911 wr32(&pf->hw, I40E_PFQF_CTL_1,
8912 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8913 i40e_flush(&pf->hw);
8917 /* Check FD flush status every 5-6msec */
8918 usleep_range(5000, 6000);
8919 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8920 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8922 } while (flush_wait_retry--);
8923 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8924 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8926 /* replay sideband filters */
8927 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8928 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8929 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8930 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8931 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8932 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8937 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8938 * @pf: board private structure
8940 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8942 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8945 /* We can see up to 256 filter programming desc in transit if the filters are
8946 * being applied really fast; before we see the first
8947 * filter miss error on Rx queue 0. Accumulating enough error messages before
8948 * reacting will make sure we don't cause flush too often.
8950 #define I40E_MAX_FD_PROGRAM_ERROR 256
8953 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8954 * @pf: board private structure
8956 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8959 /* if interface is down do nothing */
8960 if (test_bit(__I40E_DOWN, pf->state))
8963 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8964 i40e_fdir_flush_and_replay(pf);
8966 i40e_fdir_check_and_reenable(pf);
8971 * i40e_vsi_link_event - notify VSI of a link event
8972 * @vsi: vsi to be notified
8973 * @link_up: link up or down
8975 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8977 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8980 switch (vsi->type) {
8982 if (!vsi->netdev || !vsi->netdev_registered)
8986 netif_carrier_on(vsi->netdev);
8987 netif_tx_wake_all_queues(vsi->netdev);
8989 netif_carrier_off(vsi->netdev);
8990 netif_tx_stop_all_queues(vsi->netdev);
8994 case I40E_VSI_SRIOV:
8995 case I40E_VSI_VMDQ2:
8997 case I40E_VSI_IWARP:
8998 case I40E_VSI_MIRROR:
9000 /* there is no notification for other VSIs */
9006 * i40e_veb_link_event - notify elements on the veb of a link event
9007 * @veb: veb to be notified
9008 * @link_up: link up or down
9010 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9015 if (!veb || !veb->pf)
9019 /* depth first... */
9020 for (i = 0; i < I40E_MAX_VEB; i++)
9021 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9022 i40e_veb_link_event(pf->veb[i], link_up);
9024 /* ... now the local VSIs */
9025 for (i = 0; i < pf->num_alloc_vsi; i++)
9026 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9027 i40e_vsi_link_event(pf->vsi[i], link_up);
9031 * i40e_link_event - Update netif_carrier status
9032 * @pf: board private structure
9034 static void i40e_link_event(struct i40e_pf *pf)
9036 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9037 u8 new_link_speed, old_link_speed;
9039 bool new_link, old_link;
9041 /* set this to force the get_link_status call to refresh state */
9042 pf->hw.phy.get_link_info = true;
9043 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9044 status = i40e_get_link_status(&pf->hw, &new_link);
9046 /* On success, disable temp link polling */
9047 if (status == I40E_SUCCESS) {
9048 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9050 /* Enable link polling temporarily until i40e_get_link_status
9051 * returns I40E_SUCCESS
9053 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9054 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9059 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9060 new_link_speed = pf->hw.phy.link_info.link_speed;
9062 if (new_link == old_link &&
9063 new_link_speed == old_link_speed &&
9064 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9065 new_link == netif_carrier_ok(vsi->netdev)))
9068 i40e_print_link_message(vsi, new_link);
9070 /* Notify the base of the switch tree connected to
9071 * the link. Floating VEBs are not notified.
9073 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9074 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9076 i40e_vsi_link_event(vsi, new_link);
9079 i40e_vc_notify_link_state(pf);
9081 if (pf->flags & I40E_FLAG_PTP)
9082 i40e_ptp_set_increment(pf);
9086 * i40e_watchdog_subtask - periodic checks not using event driven response
9087 * @pf: board private structure
9089 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9093 /* if interface is down do nothing */
9094 if (test_bit(__I40E_DOWN, pf->state) ||
9095 test_bit(__I40E_CONFIG_BUSY, pf->state))
9098 /* make sure we don't do these things too often */
9099 if (time_before(jiffies, (pf->service_timer_previous +
9100 pf->service_timer_period)))
9102 pf->service_timer_previous = jiffies;
9104 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9105 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9106 i40e_link_event(pf);
9108 /* Update the stats for active netdevs so the network stack
9109 * can look at updated numbers whenever it cares to
9111 for (i = 0; i < pf->num_alloc_vsi; i++)
9112 if (pf->vsi[i] && pf->vsi[i]->netdev)
9113 i40e_update_stats(pf->vsi[i]);
9115 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9116 /* Update the stats for the active switching components */
9117 for (i = 0; i < I40E_MAX_VEB; i++)
9119 i40e_update_veb_stats(pf->veb[i]);
9122 i40e_ptp_rx_hang(pf);
9123 i40e_ptp_tx_hang(pf);
9127 * i40e_reset_subtask - Set up for resetting the device and driver
9128 * @pf: board private structure
9130 static void i40e_reset_subtask(struct i40e_pf *pf)
9132 u32 reset_flags = 0;
9134 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9135 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9136 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9138 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9139 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9140 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9142 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9143 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9144 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9146 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9147 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9148 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9150 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9151 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9152 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9155 /* If there's a recovery already waiting, it takes
9156 * precedence before starting a new reset sequence.
9158 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9159 i40e_prep_for_reset(pf, false);
9161 i40e_rebuild(pf, false, false);
9164 /* If we're already down or resetting, just bail */
9166 !test_bit(__I40E_DOWN, pf->state) &&
9167 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9168 i40e_do_reset(pf, reset_flags, false);
9173 * i40e_handle_link_event - Handle link event
9174 * @pf: board private structure
9175 * @e: event info posted on ARQ
9177 static void i40e_handle_link_event(struct i40e_pf *pf,
9178 struct i40e_arq_event_info *e)
9180 struct i40e_aqc_get_link_status *status =
9181 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9183 /* Do a new status request to re-enable LSE reporting
9184 * and load new status information into the hw struct
9185 * This completely ignores any state information
9186 * in the ARQ event info, instead choosing to always
9187 * issue the AQ update link status command.
9189 i40e_link_event(pf);
9191 /* Check if module meets thermal requirements */
9192 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9193 dev_err(&pf->pdev->dev,
9194 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9195 dev_err(&pf->pdev->dev,
9196 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9198 /* check for unqualified module, if link is down, suppress
9199 * the message if link was forced to be down.
9201 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9202 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9203 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9204 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9205 dev_err(&pf->pdev->dev,
9206 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9207 dev_err(&pf->pdev->dev,
9208 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9214 * i40e_clean_adminq_subtask - Clean the AdminQ rings
9215 * @pf: board private structure
9217 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9219 struct i40e_arq_event_info event;
9220 struct i40e_hw *hw = &pf->hw;
9227 /* Do not run clean AQ when PF reset fails */
9228 if (test_bit(__I40E_RESET_FAILED, pf->state))
9231 /* check for error indications */
9232 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9234 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9235 if (hw->debug_mask & I40E_DEBUG_AQ)
9236 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9237 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9239 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9240 if (hw->debug_mask & I40E_DEBUG_AQ)
9241 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9242 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9243 pf->arq_overflows++;
9245 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9246 if (hw->debug_mask & I40E_DEBUG_AQ)
9247 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9248 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9251 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9253 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9255 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9256 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9257 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9258 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9260 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9261 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9262 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9263 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9265 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9266 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9267 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9268 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9271 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9273 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9274 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9279 ret = i40e_clean_arq_element(hw, &event, &pending);
9280 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9283 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9287 opcode = le16_to_cpu(event.desc.opcode);
9290 case i40e_aqc_opc_get_link_status:
9291 i40e_handle_link_event(pf, &event);
9293 case i40e_aqc_opc_send_msg_to_pf:
9294 ret = i40e_vc_process_vf_msg(pf,
9295 le16_to_cpu(event.desc.retval),
9296 le32_to_cpu(event.desc.cookie_high),
9297 le32_to_cpu(event.desc.cookie_low),
9301 case i40e_aqc_opc_lldp_update_mib:
9302 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9303 #ifdef CONFIG_I40E_DCB
9305 ret = i40e_handle_lldp_event(pf, &event);
9307 #endif /* CONFIG_I40E_DCB */
9309 case i40e_aqc_opc_event_lan_overflow:
9310 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9311 i40e_handle_lan_overflow_event(pf, &event);
9313 case i40e_aqc_opc_send_msg_to_peer:
9314 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9316 case i40e_aqc_opc_nvm_erase:
9317 case i40e_aqc_opc_nvm_update:
9318 case i40e_aqc_opc_oem_post_update:
9319 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9320 "ARQ NVM operation 0x%04x completed\n",
9324 dev_info(&pf->pdev->dev,
9325 "ARQ: Unknown event 0x%04x ignored\n",
9329 } while (i++ < pf->adminq_work_limit);
9331 if (i < pf->adminq_work_limit)
9332 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9334 /* re-enable Admin queue interrupt cause */
9335 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9336 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9337 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9340 kfree(event.msg_buf);
9344 * i40e_verify_eeprom - make sure eeprom is good to use
9345 * @pf: board private structure
9347 static void i40e_verify_eeprom(struct i40e_pf *pf)
9351 err = i40e_diag_eeprom_test(&pf->hw);
9353 /* retry in case of garbage read */
9354 err = i40e_diag_eeprom_test(&pf->hw);
9356 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9358 set_bit(__I40E_BAD_EEPROM, pf->state);
9362 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9363 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9364 clear_bit(__I40E_BAD_EEPROM, pf->state);
9369 * i40e_enable_pf_switch_lb
9370 * @pf: pointer to the PF structure
9372 * enable switch loop back or die - no point in a return value
9374 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9376 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9377 struct i40e_vsi_context ctxt;
9380 ctxt.seid = pf->main_vsi_seid;
9381 ctxt.pf_num = pf->hw.pf_id;
9383 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9385 dev_info(&pf->pdev->dev,
9386 "couldn't get PF vsi config, err %s aq_err %s\n",
9387 i40e_stat_str(&pf->hw, ret),
9388 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9391 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9392 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9393 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9395 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9397 dev_info(&pf->pdev->dev,
9398 "update vsi switch failed, err %s aq_err %s\n",
9399 i40e_stat_str(&pf->hw, ret),
9400 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9405 * i40e_disable_pf_switch_lb
9406 * @pf: pointer to the PF structure
9408 * disable switch loop back or die - no point in a return value
9410 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9412 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9413 struct i40e_vsi_context ctxt;
9416 ctxt.seid = pf->main_vsi_seid;
9417 ctxt.pf_num = pf->hw.pf_id;
9419 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9421 dev_info(&pf->pdev->dev,
9422 "couldn't get PF vsi config, err %s aq_err %s\n",
9423 i40e_stat_str(&pf->hw, ret),
9424 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9427 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9428 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9429 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9431 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9433 dev_info(&pf->pdev->dev,
9434 "update vsi switch failed, err %s aq_err %s\n",
9435 i40e_stat_str(&pf->hw, ret),
9436 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9441 * i40e_config_bridge_mode - Configure the HW bridge mode
9442 * @veb: pointer to the bridge instance
9444 * Configure the loop back mode for the LAN VSI that is downlink to the
9445 * specified HW bridge instance. It is expected this function is called
9446 * when a new HW bridge is instantiated.
9448 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9450 struct i40e_pf *pf = veb->pf;
9452 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9453 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9454 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9455 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9456 i40e_disable_pf_switch_lb(pf);
9458 i40e_enable_pf_switch_lb(pf);
9462 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
9463 * @veb: pointer to the VEB instance
9465 * This is a recursive function that first builds the attached VSIs then
9466 * recurses in to build the next layer of VEB. We track the connections
9467 * through our own index numbers because the seid's from the HW could
9468 * change across the reset.
9470 static int i40e_reconstitute_veb(struct i40e_veb *veb)
9472 struct i40e_vsi *ctl_vsi = NULL;
9473 struct i40e_pf *pf = veb->pf;
9477 /* build VSI that owns this VEB, temporarily attached to base VEB */
9478 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9480 pf->vsi[v]->veb_idx == veb->idx &&
9481 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9482 ctl_vsi = pf->vsi[v];
9487 dev_info(&pf->pdev->dev,
9488 "missing owner VSI for veb_idx %d\n", veb->idx);
9490 goto end_reconstitute;
9492 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9493 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9494 ret = i40e_add_vsi(ctl_vsi);
9496 dev_info(&pf->pdev->dev,
9497 "rebuild of veb_idx %d owner VSI failed: %d\n",
9499 goto end_reconstitute;
9501 i40e_vsi_reset_stats(ctl_vsi);
9503 /* create the VEB in the switch and move the VSI onto the VEB */
9504 ret = i40e_add_veb(veb, ctl_vsi);
9506 goto end_reconstitute;
9508 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9509 veb->bridge_mode = BRIDGE_MODE_VEB;
9511 veb->bridge_mode = BRIDGE_MODE_VEPA;
9512 i40e_config_bridge_mode(veb);
9514 /* create the remaining VSIs attached to this VEB */
9515 for (v = 0; v < pf->num_alloc_vsi; v++) {
9516 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9519 if (pf->vsi[v]->veb_idx == veb->idx) {
9520 struct i40e_vsi *vsi = pf->vsi[v];
9522 vsi->uplink_seid = veb->seid;
9523 ret = i40e_add_vsi(vsi);
9525 dev_info(&pf->pdev->dev,
9526 "rebuild of vsi_idx %d failed: %d\n",
9528 goto end_reconstitute;
9530 i40e_vsi_reset_stats(vsi);
9534 /* create any VEBs attached to this VEB - RECURSION */
9535 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9536 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9537 pf->veb[veb_idx]->uplink_seid = veb->seid;
9538 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9549 * i40e_get_capabilities - get info about the HW
9550 * @pf: the PF struct
9552 static int i40e_get_capabilities(struct i40e_pf *pf,
9553 enum i40e_admin_queue_opc list_type)
9555 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9560 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9562 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9566 /* this loads the data into the hw struct for us */
9567 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9568 &data_size, list_type,
9570 /* data loaded, buffer no longer needed */
9573 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9574 /* retry with a larger buffer */
9575 buf_len = data_size;
9576 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9577 dev_info(&pf->pdev->dev,
9578 "capability discovery failed, err %s aq_err %s\n",
9579 i40e_stat_str(&pf->hw, err),
9580 i40e_aq_str(&pf->hw,
9581 pf->hw.aq.asq_last_status));
9586 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9587 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9588 dev_info(&pf->pdev->dev,
9589 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9590 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9591 pf->hw.func_caps.num_msix_vectors,
9592 pf->hw.func_caps.num_msix_vectors_vf,
9593 pf->hw.func_caps.fd_filters_guaranteed,
9594 pf->hw.func_caps.fd_filters_best_effort,
9595 pf->hw.func_caps.num_tx_qp,
9596 pf->hw.func_caps.num_vsis);
9597 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9598 dev_info(&pf->pdev->dev,
9599 "switch_mode=0x%04x, function_valid=0x%08x\n",
9600 pf->hw.dev_caps.switch_mode,
9601 pf->hw.dev_caps.valid_functions);
9602 dev_info(&pf->pdev->dev,
9603 "SR-IOV=%d, num_vfs for all function=%u\n",
9604 pf->hw.dev_caps.sr_iov_1_1,
9605 pf->hw.dev_caps.num_vfs);
9606 dev_info(&pf->pdev->dev,
9607 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9608 pf->hw.dev_caps.num_vsis,
9609 pf->hw.dev_caps.num_rx_qp,
9610 pf->hw.dev_caps.num_tx_qp);
9613 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9614 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9615 + pf->hw.func_caps.num_vfs)
9616 if (pf->hw.revision_id == 0 &&
9617 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9618 dev_info(&pf->pdev->dev,
9619 "got num_vsis %d, setting num_vsis to %d\n",
9620 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9621 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9627 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9630 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9631 * @pf: board private structure
9633 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9635 struct i40e_vsi *vsi;
9637 /* quick workaround for an NVM issue that leaves a critical register
9640 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9641 static const u32 hkey[] = {
9642 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9643 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9644 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9648 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9649 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9652 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9655 /* find existing VSI and see if it needs configuring */
9656 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9658 /* create a new VSI if none exists */
9660 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9661 pf->vsi[pf->lan_vsi]->seid, 0);
9663 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9664 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9665 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9670 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9674 * i40e_fdir_teardown - release the Flow Director resources
9675 * @pf: board private structure
9677 static void i40e_fdir_teardown(struct i40e_pf *pf)
9679 struct i40e_vsi *vsi;
9681 i40e_fdir_filter_exit(pf);
9682 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9684 i40e_vsi_release(vsi);
9688 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9690 * @seid: seid of main or channel VSIs
9692 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9693 * existed before reset
9695 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9697 struct i40e_cloud_filter *cfilter;
9698 struct i40e_pf *pf = vsi->back;
9699 struct hlist_node *node;
9702 /* Add cloud filters back if they exist */
9703 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9705 if (cfilter->seid != seid)
9708 if (cfilter->dst_port)
9709 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9712 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9715 dev_dbg(&pf->pdev->dev,
9716 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9717 i40e_stat_str(&pf->hw, ret),
9718 i40e_aq_str(&pf->hw,
9719 pf->hw.aq.asq_last_status));
9727 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9730 * Rebuilds channel VSIs if they existed before reset
9732 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9734 struct i40e_channel *ch, *ch_tmp;
9737 if (list_empty(&vsi->ch_list))
9740 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9741 if (!ch->initialized)
9743 /* Proceed with creation of channel (VMDq2) VSI */
9744 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9746 dev_info(&vsi->back->pdev->dev,
9747 "failed to rebuild channels using uplink_seid %u\n",
9751 /* Reconfigure TX queues using QTX_CTL register */
9752 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9754 dev_info(&vsi->back->pdev->dev,
9755 "failed to configure TX rings for channel %u\n",
9759 /* update 'next_base_queue' */
9760 vsi->next_base_queue = vsi->next_base_queue +
9761 ch->num_queue_pairs;
9762 if (ch->max_tx_rate) {
9763 u64 credits = ch->max_tx_rate;
9765 if (i40e_set_bw_limit(vsi, ch->seid,
9769 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9770 dev_dbg(&vsi->back->pdev->dev,
9771 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9776 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9778 dev_dbg(&vsi->back->pdev->dev,
9779 "Failed to rebuild cloud filters for channel VSI %u\n",
9788 * i40e_prep_for_reset - prep for the core to reset
9789 * @pf: board private structure
9790 * @lock_acquired: indicates whether or not the lock has been acquired
9791 * before this function was called.
9793 * Close up the VFs and other things in prep for PF Reset.
9795 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9797 struct i40e_hw *hw = &pf->hw;
9798 i40e_status ret = 0;
9801 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9802 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9804 if (i40e_check_asq_alive(&pf->hw))
9805 i40e_vc_notify_reset(pf);
9807 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9809 /* quiesce the VSIs and their queues that are not already DOWN */
9810 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9813 i40e_pf_quiesce_all_vsi(pf);
9817 for (v = 0; v < pf->num_alloc_vsi; v++) {
9819 pf->vsi[v]->seid = 0;
9822 i40e_shutdown_adminq(&pf->hw);
9824 /* call shutdown HMC */
9825 if (hw->hmc.hmc_obj) {
9826 ret = i40e_shutdown_lan_hmc(hw);
9828 dev_warn(&pf->pdev->dev,
9829 "shutdown_lan_hmc failed: %d\n", ret);
9832 /* Save the current PTP time so that we can restore the time after the
9835 i40e_ptp_save_hw_time(pf);
9839 * i40e_send_version - update firmware with driver version
9842 static void i40e_send_version(struct i40e_pf *pf)
9844 struct i40e_driver_version dv;
9846 dv.major_version = DRV_VERSION_MAJOR;
9847 dv.minor_version = DRV_VERSION_MINOR;
9848 dv.build_version = DRV_VERSION_BUILD;
9849 dv.subbuild_version = 0;
9850 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9851 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9855 * i40e_get_oem_version - get OEM specific version information
9856 * @hw: pointer to the hardware structure
9858 static void i40e_get_oem_version(struct i40e_hw *hw)
9860 u16 block_offset = 0xffff;
9861 u16 block_length = 0;
9862 u16 capabilities = 0;
9866 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9867 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9868 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9869 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9870 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9871 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9872 #define I40E_NVM_OEM_LENGTH 3
9874 /* Check if pointer to OEM version block is valid. */
9875 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9876 if (block_offset == 0xffff)
9879 /* Check if OEM version block has correct length. */
9880 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9882 if (block_length < I40E_NVM_OEM_LENGTH)
9885 /* Check if OEM version format is as expected. */
9886 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9888 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9891 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9893 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9895 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9896 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9900 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9901 * @pf: board private structure
9903 static int i40e_reset(struct i40e_pf *pf)
9905 struct i40e_hw *hw = &pf->hw;
9908 ret = i40e_pf_reset(hw);
9910 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9911 set_bit(__I40E_RESET_FAILED, pf->state);
9912 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9920 * i40e_rebuild - rebuild using a saved config
9921 * @pf: board private structure
9922 * @reinit: if the Main VSI needs to re-initialized.
9923 * @lock_acquired: indicates whether or not the lock has been acquired
9924 * before this function was called.
9926 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9928 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9929 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9930 struct i40e_hw *hw = &pf->hw;
9931 u8 set_fc_aq_fail = 0;
9936 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9937 i40e_check_recovery_mode(pf)) {
9938 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9941 if (test_bit(__I40E_DOWN, pf->state) &&
9942 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9943 !old_recovery_mode_bit)
9944 goto clear_recovery;
9945 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9947 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9948 ret = i40e_init_adminq(&pf->hw);
9950 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9951 i40e_stat_str(&pf->hw, ret),
9952 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9953 goto clear_recovery;
9955 i40e_get_oem_version(&pf->hw);
9957 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9958 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9959 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9960 /* The following delay is necessary for 4.33 firmware and older
9961 * to recover after EMP reset. 200 ms should suffice but we
9962 * put here 300 ms to be sure that FW is ready to operate
9968 /* re-verify the eeprom if we just had an EMP reset */
9969 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9970 i40e_verify_eeprom(pf);
9972 /* if we are going out of or into recovery mode we have to act
9973 * accordingly with regard to resources initialization
9974 * and deinitialization
9976 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
9977 old_recovery_mode_bit) {
9978 if (i40e_get_capabilities(pf,
9979 i40e_aqc_opc_list_func_capabilities))
9982 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
9983 /* we're staying in recovery mode so we'll reinitialize
9986 if (i40e_setup_misc_vector_for_recovery_mode(pf))
9991 /* we're going out of recovery mode so we'll free
9992 * the IRQ allocated specifically for recovery mode
9993 * and restore the interrupt scheme
9995 free_irq(pf->pdev->irq, pf);
9996 i40e_clear_interrupt_scheme(pf);
9997 if (i40e_restore_interrupt_scheme(pf))
10001 /* tell the firmware that we're starting */
10002 i40e_send_version(pf);
10004 /* bail out in case recovery mode was detected, as there is
10005 * no need for further configuration.
10010 i40e_clear_pxe_mode(hw);
10011 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10013 goto end_core_reset;
10015 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10016 hw->func_caps.num_rx_qp, 0, 0);
10018 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10019 goto end_core_reset;
10021 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10023 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10024 goto end_core_reset;
10027 /* Enable FW to write a default DCB config on link-up */
10028 i40e_aq_set_dcb_parameters(hw, true, NULL);
10030 #ifdef CONFIG_I40E_DCB
10031 ret = i40e_init_pf_dcb(pf);
10033 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10034 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10035 /* Continue without DCB enabled */
10037 #endif /* CONFIG_I40E_DCB */
10038 /* do basic switch setup */
10039 if (!lock_acquired)
10041 ret = i40e_setup_pf_switch(pf, reinit);
10045 /* The driver only wants link up/down and module qualification
10046 * reports from firmware. Note the negative logic.
10048 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10049 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10050 I40E_AQ_EVENT_MEDIA_NA |
10051 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10053 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10054 i40e_stat_str(&pf->hw, ret),
10055 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10057 /* make sure our flow control settings are restored */
10058 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10060 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10061 i40e_stat_str(&pf->hw, ret),
10062 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10064 /* Rebuild the VSIs and VEBs that existed before reset.
10065 * They are still in our local switch element arrays, so only
10066 * need to rebuild the switch model in the HW.
10068 * If there were VEBs but the reconstitution failed, we'll try
10069 * try to recover minimal use by getting the basic PF VSI working.
10071 if (vsi->uplink_seid != pf->mac_seid) {
10072 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10073 /* find the one VEB connected to the MAC, and find orphans */
10074 for (v = 0; v < I40E_MAX_VEB; v++) {
10078 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10079 pf->veb[v]->uplink_seid == 0) {
10080 ret = i40e_reconstitute_veb(pf->veb[v]);
10085 /* If Main VEB failed, we're in deep doodoo,
10086 * so give up rebuilding the switch and set up
10087 * for minimal rebuild of PF VSI.
10088 * If orphan failed, we'll report the error
10089 * but try to keep going.
10091 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10092 dev_info(&pf->pdev->dev,
10093 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10095 vsi->uplink_seid = pf->mac_seid;
10097 } else if (pf->veb[v]->uplink_seid == 0) {
10098 dev_info(&pf->pdev->dev,
10099 "rebuild of orphan VEB failed: %d\n",
10106 if (vsi->uplink_seid == pf->mac_seid) {
10107 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10108 /* no VEB, so rebuild only the Main VSI */
10109 ret = i40e_add_vsi(vsi);
10111 dev_info(&pf->pdev->dev,
10112 "rebuild of Main VSI failed: %d\n", ret);
10117 if (vsi->mqprio_qopt.max_rate[0]) {
10118 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10121 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10122 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10126 credits = max_tx_rate;
10127 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10128 dev_dbg(&vsi->back->pdev->dev,
10129 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10135 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10139 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
10140 * for this main VSI if they exist
10142 ret = i40e_rebuild_channels(vsi);
10146 /* Reconfigure hardware for allowing smaller MSS in the case
10147 * of TSO, so that we avoid the MDD being fired and causing
10148 * a reset in the case of small MSS+TSO.
10150 #define I40E_REG_MSS 0x000E64DC
10151 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10152 #define I40E_64BYTE_MSS 0x400000
10153 val = rd32(hw, I40E_REG_MSS);
10154 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10155 val &= ~I40E_REG_MSS_MIN_MASK;
10156 val |= I40E_64BYTE_MSS;
10157 wr32(hw, I40E_REG_MSS, val);
10160 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10162 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10164 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10165 i40e_stat_str(&pf->hw, ret),
10166 i40e_aq_str(&pf->hw,
10167 pf->hw.aq.asq_last_status));
10169 /* reinit the misc interrupt */
10170 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10171 ret = i40e_setup_misc_vector(pf);
10173 /* Add a filter to drop all Flow control frames from any VSI from being
10174 * transmitted. By doing so we stop a malicious VF from sending out
10175 * PAUSE or PFC frames and potentially controlling traffic for other
10177 * The FW can still send Flow control frames if enabled.
10179 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10180 pf->main_vsi_seid);
10182 /* restart the VSIs that were rebuilt and running before the reset */
10183 i40e_pf_unquiesce_all_vsi(pf);
10185 /* Release the RTNL lock before we start resetting VFs */
10186 if (!lock_acquired)
10189 /* Restore promiscuous settings */
10190 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10192 dev_warn(&pf->pdev->dev,
10193 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10194 pf->cur_promisc ? "on" : "off",
10195 i40e_stat_str(&pf->hw, ret),
10196 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10198 i40e_reset_all_vfs(pf, true);
10200 /* tell the firmware that we're starting */
10201 i40e_send_version(pf);
10203 /* We've already released the lock, so don't do it again */
10204 goto end_core_reset;
10207 if (!lock_acquired)
10210 clear_bit(__I40E_RESET_FAILED, pf->state);
10212 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10213 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10217 * i40e_reset_and_rebuild - reset and rebuild using a saved config
10218 * @pf: board private structure
10219 * @reinit: if the Main VSI needs to re-initialized.
10220 * @lock_acquired: indicates whether or not the lock has been acquired
10221 * before this function was called.
10223 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10224 bool lock_acquired)
10227 /* Now we wait for GRST to settle out.
10228 * We don't have to delete the VEBs or VSIs from the hw switch
10229 * because the reset will make them disappear.
10231 ret = i40e_reset(pf);
10233 i40e_rebuild(pf, reinit, lock_acquired);
10237 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
10238 * @pf: board private structure
10240 * Close up the VFs and other things in prep for a Core Reset,
10241 * then get ready to rebuild the world.
10242 * @lock_acquired: indicates whether or not the lock has been acquired
10243 * before this function was called.
10245 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10247 i40e_prep_for_reset(pf, lock_acquired);
10248 i40e_reset_and_rebuild(pf, false, lock_acquired);
10252 * i40e_handle_mdd_event
10253 * @pf: pointer to the PF structure
10255 * Called from the MDD irq handler to identify possibly malicious vfs
10257 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10259 struct i40e_hw *hw = &pf->hw;
10260 bool mdd_detected = false;
10261 struct i40e_vf *vf;
10265 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10268 /* find what triggered the MDD event */
10269 reg = rd32(hw, I40E_GL_MDET_TX);
10270 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10271 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10272 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10273 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10274 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10275 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10276 I40E_GL_MDET_TX_EVENT_SHIFT;
10277 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10278 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10279 pf->hw.func_caps.base_queue;
10280 if (netif_msg_tx_err(pf))
10281 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10282 event, queue, pf_num, vf_num);
10283 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10284 mdd_detected = true;
10286 reg = rd32(hw, I40E_GL_MDET_RX);
10287 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10288 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10289 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10290 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10291 I40E_GL_MDET_RX_EVENT_SHIFT;
10292 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10293 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10294 pf->hw.func_caps.base_queue;
10295 if (netif_msg_rx_err(pf))
10296 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10297 event, queue, func);
10298 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10299 mdd_detected = true;
10302 if (mdd_detected) {
10303 reg = rd32(hw, I40E_PF_MDET_TX);
10304 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10305 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10306 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10308 reg = rd32(hw, I40E_PF_MDET_RX);
10309 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10310 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10311 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10315 /* see if one of the VFs needs its hand slapped */
10316 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10318 reg = rd32(hw, I40E_VP_MDET_TX(i));
10319 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10320 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10321 vf->num_mdd_events++;
10322 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10324 dev_info(&pf->pdev->dev,
10325 "Use PF Control I/F to re-enable the VF\n");
10326 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10329 reg = rd32(hw, I40E_VP_MDET_RX(i));
10330 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10331 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10332 vf->num_mdd_events++;
10333 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10335 dev_info(&pf->pdev->dev,
10336 "Use PF Control I/F to re-enable the VF\n");
10337 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10341 /* re-enable mdd interrupt cause */
10342 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10343 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10344 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10345 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10349 static const char *i40e_tunnel_name(u8 type)
10352 case UDP_TUNNEL_TYPE_VXLAN:
10354 case UDP_TUNNEL_TYPE_GENEVE:
10362 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
10363 * @pf: board private structure
10365 static void i40e_sync_udp_filters(struct i40e_pf *pf)
10369 /* loop through and set pending bit for all active UDP filters */
10370 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10371 if (pf->udp_ports[i].port)
10372 pf->pending_udp_bitmap |= BIT_ULL(i);
10375 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
10379 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
10380 * @pf: board private structure
10382 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
10384 struct i40e_hw *hw = &pf->hw;
10385 u8 filter_index, type;
10389 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
10392 /* acquire RTNL to maintain state of flags and port requests */
10395 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10396 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
10397 struct i40e_udp_port_config *udp_port;
10398 i40e_status ret = 0;
10400 udp_port = &pf->udp_ports[i];
10401 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10403 port = READ_ONCE(udp_port->port);
10404 type = READ_ONCE(udp_port->type);
10405 filter_index = READ_ONCE(udp_port->filter_index);
10407 /* release RTNL while we wait on AQ command */
10411 ret = i40e_aq_add_udp_tunnel(hw, port,
10415 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
10416 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
10419 /* reacquire RTNL so we can update filter_index */
10423 dev_info(&pf->pdev->dev,
10424 "%s %s port %d, index %d failed, err %s aq_err %s\n",
10425 i40e_tunnel_name(type),
10426 port ? "add" : "delete",
10429 i40e_stat_str(&pf->hw, ret),
10430 i40e_aq_str(&pf->hw,
10431 pf->hw.aq.asq_last_status));
10433 /* failed to add, just reset port,
10434 * drop pending bit for any deletion
10436 udp_port->port = 0;
10437 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10440 /* record filter index on success */
10441 udp_port->filter_index = filter_index;
10450 * i40e_service_task - Run the driver's async subtasks
10451 * @work: pointer to work_struct containing our data
10453 static void i40e_service_task(struct work_struct *work)
10455 struct i40e_pf *pf = container_of(work,
10458 unsigned long start_time = jiffies;
10460 /* don't bother with service tasks if a reset is in progress */
10461 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10462 test_bit(__I40E_SUSPENDED, pf->state))
10465 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10468 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10469 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10470 i40e_sync_filters_subtask(pf);
10471 i40e_reset_subtask(pf);
10472 i40e_handle_mdd_event(pf);
10473 i40e_vc_process_vflr_event(pf);
10474 i40e_watchdog_subtask(pf);
10475 i40e_fdir_reinit_subtask(pf);
10476 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10477 /* Client subtask will reopen next time through. */
10478 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10481 i40e_client_subtask(pf);
10482 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10484 i40e_notify_client_of_l2_param_changes(
10485 pf->vsi[pf->lan_vsi]);
10487 i40e_sync_filters_subtask(pf);
10488 i40e_sync_udp_filters_subtask(pf);
10490 i40e_reset_subtask(pf);
10493 i40e_clean_adminq_subtask(pf);
10495 /* flush memory to make sure state is correct before next watchdog */
10496 smp_mb__before_atomic();
10497 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10499 /* If the tasks have taken longer than one timer cycle or there
10500 * is more work to be done, reschedule the service task now
10501 * rather than wait for the timer to tick again.
10503 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10504 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10505 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10506 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10507 i40e_service_event_schedule(pf);
10511 * i40e_service_timer - timer callback
10512 * @data: pointer to PF struct
10514 static void i40e_service_timer(struct timer_list *t)
10516 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10518 mod_timer(&pf->service_timer,
10519 round_jiffies(jiffies + pf->service_timer_period));
10520 i40e_service_event_schedule(pf);
10524 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10525 * @vsi: the VSI being configured
10527 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10529 struct i40e_pf *pf = vsi->back;
10531 switch (vsi->type) {
10532 case I40E_VSI_MAIN:
10533 vsi->alloc_queue_pairs = pf->num_lan_qps;
10534 if (!vsi->num_tx_desc)
10535 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10536 I40E_REQ_DESCRIPTOR_MULTIPLE);
10537 if (!vsi->num_rx_desc)
10538 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10539 I40E_REQ_DESCRIPTOR_MULTIPLE);
10540 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10541 vsi->num_q_vectors = pf->num_lan_msix;
10543 vsi->num_q_vectors = 1;
10547 case I40E_VSI_FDIR:
10548 vsi->alloc_queue_pairs = 1;
10549 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10550 I40E_REQ_DESCRIPTOR_MULTIPLE);
10551 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10552 I40E_REQ_DESCRIPTOR_MULTIPLE);
10553 vsi->num_q_vectors = pf->num_fdsb_msix;
10556 case I40E_VSI_VMDQ2:
10557 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10558 if (!vsi->num_tx_desc)
10559 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10560 I40E_REQ_DESCRIPTOR_MULTIPLE);
10561 if (!vsi->num_rx_desc)
10562 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10563 I40E_REQ_DESCRIPTOR_MULTIPLE);
10564 vsi->num_q_vectors = pf->num_vmdq_msix;
10567 case I40E_VSI_SRIOV:
10568 vsi->alloc_queue_pairs = pf->num_vf_qps;
10569 if (!vsi->num_tx_desc)
10570 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10571 I40E_REQ_DESCRIPTOR_MULTIPLE);
10572 if (!vsi->num_rx_desc)
10573 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10574 I40E_REQ_DESCRIPTOR_MULTIPLE);
10586 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10587 * @vsi: VSI pointer
10588 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10590 * On error: returns error code (negative)
10591 * On success: returns 0
10593 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10595 struct i40e_ring **next_rings;
10599 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10600 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10601 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10602 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10603 if (!vsi->tx_rings)
10605 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10606 if (i40e_enabled_xdp_vsi(vsi)) {
10607 vsi->xdp_rings = next_rings;
10608 next_rings += vsi->alloc_queue_pairs;
10610 vsi->rx_rings = next_rings;
10612 if (alloc_qvectors) {
10613 /* allocate memory for q_vector pointers */
10614 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10615 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10616 if (!vsi->q_vectors) {
10624 kfree(vsi->tx_rings);
10629 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10630 * @pf: board private structure
10631 * @type: type of VSI
10633 * On error: returns error code (negative)
10634 * On success: returns vsi index in PF (positive)
10636 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10639 struct i40e_vsi *vsi;
10643 /* Need to protect the allocation of the VSIs at the PF level */
10644 mutex_lock(&pf->switch_mutex);
10646 /* VSI list may be fragmented if VSI creation/destruction has
10647 * been happening. We can afford to do a quick scan to look
10648 * for any free VSIs in the list.
10650 * find next empty vsi slot, looping back around if necessary
10653 while (i < pf->num_alloc_vsi && pf->vsi[i])
10655 if (i >= pf->num_alloc_vsi) {
10657 while (i < pf->next_vsi && pf->vsi[i])
10661 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10662 vsi_idx = i; /* Found one! */
10665 goto unlock_pf; /* out of VSI slots! */
10667 pf->next_vsi = ++i;
10669 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10676 set_bit(__I40E_VSI_DOWN, vsi->state);
10678 vsi->idx = vsi_idx;
10679 vsi->int_rate_limit = 0;
10680 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10681 pf->rss_table_size : 64;
10682 vsi->netdev_registered = false;
10683 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10684 hash_init(vsi->mac_filter_hash);
10685 vsi->irqs_ready = false;
10687 if (type == I40E_VSI_MAIN) {
10688 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10689 if (!vsi->af_xdp_zc_qps)
10693 ret = i40e_set_num_rings_in_vsi(vsi);
10697 ret = i40e_vsi_alloc_arrays(vsi, true);
10701 /* Setup default MSIX irq handler for VSI */
10702 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10704 /* Initialize VSI lock */
10705 spin_lock_init(&vsi->mac_filter_hash_lock);
10706 pf->vsi[vsi_idx] = vsi;
10711 bitmap_free(vsi->af_xdp_zc_qps);
10712 pf->next_vsi = i - 1;
10715 mutex_unlock(&pf->switch_mutex);
10720 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10721 * @vsi: VSI pointer
10722 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10724 * On error: returns error code (negative)
10725 * On success: returns 0
10727 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10729 /* free the ring and vector containers */
10730 if (free_qvectors) {
10731 kfree(vsi->q_vectors);
10732 vsi->q_vectors = NULL;
10734 kfree(vsi->tx_rings);
10735 vsi->tx_rings = NULL;
10736 vsi->rx_rings = NULL;
10737 vsi->xdp_rings = NULL;
10741 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10743 * @vsi: Pointer to VSI structure
10745 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10750 kfree(vsi->rss_hkey_user);
10751 vsi->rss_hkey_user = NULL;
10753 kfree(vsi->rss_lut_user);
10754 vsi->rss_lut_user = NULL;
10758 * i40e_vsi_clear - Deallocate the VSI provided
10759 * @vsi: the VSI being un-configured
10761 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10763 struct i40e_pf *pf;
10772 mutex_lock(&pf->switch_mutex);
10773 if (!pf->vsi[vsi->idx]) {
10774 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10775 vsi->idx, vsi->idx, vsi->type);
10779 if (pf->vsi[vsi->idx] != vsi) {
10780 dev_err(&pf->pdev->dev,
10781 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10782 pf->vsi[vsi->idx]->idx,
10783 pf->vsi[vsi->idx]->type,
10784 vsi->idx, vsi->type);
10788 /* updates the PF for this cleared vsi */
10789 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10790 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10792 bitmap_free(vsi->af_xdp_zc_qps);
10793 i40e_vsi_free_arrays(vsi, true);
10794 i40e_clear_rss_config_user(vsi);
10796 pf->vsi[vsi->idx] = NULL;
10797 if (vsi->idx < pf->next_vsi)
10798 pf->next_vsi = vsi->idx;
10801 mutex_unlock(&pf->switch_mutex);
10809 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10810 * @vsi: the VSI being cleaned
10812 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10816 if (vsi->tx_rings && vsi->tx_rings[0]) {
10817 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10818 kfree_rcu(vsi->tx_rings[i], rcu);
10819 vsi->tx_rings[i] = NULL;
10820 vsi->rx_rings[i] = NULL;
10821 if (vsi->xdp_rings)
10822 vsi->xdp_rings[i] = NULL;
10828 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10829 * @vsi: the VSI being configured
10831 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10833 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10834 struct i40e_pf *pf = vsi->back;
10835 struct i40e_ring *ring;
10837 /* Set basic values in the rings to be used later during open() */
10838 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10839 /* allocate space for both Tx and Rx in one shot */
10840 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10844 ring->queue_index = i;
10845 ring->reg_idx = vsi->base_queue + i;
10846 ring->ring_active = false;
10848 ring->netdev = vsi->netdev;
10849 ring->dev = &pf->pdev->dev;
10850 ring->count = vsi->num_tx_desc;
10853 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10854 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10855 ring->itr_setting = pf->tx_itr_default;
10856 vsi->tx_rings[i] = ring++;
10858 if (!i40e_enabled_xdp_vsi(vsi))
10861 ring->queue_index = vsi->alloc_queue_pairs + i;
10862 ring->reg_idx = vsi->base_queue + ring->queue_index;
10863 ring->ring_active = false;
10865 ring->netdev = NULL;
10866 ring->dev = &pf->pdev->dev;
10867 ring->count = vsi->num_tx_desc;
10870 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10871 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10872 set_ring_xdp(ring);
10873 ring->itr_setting = pf->tx_itr_default;
10874 vsi->xdp_rings[i] = ring++;
10877 ring->queue_index = i;
10878 ring->reg_idx = vsi->base_queue + i;
10879 ring->ring_active = false;
10881 ring->netdev = vsi->netdev;
10882 ring->dev = &pf->pdev->dev;
10883 ring->count = vsi->num_rx_desc;
10886 ring->itr_setting = pf->rx_itr_default;
10887 vsi->rx_rings[i] = ring;
10893 i40e_vsi_clear_rings(vsi);
10898 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10899 * @pf: board private structure
10900 * @vectors: the number of MSI-X vectors to request
10902 * Returns the number of vectors reserved, or error
10904 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10906 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10907 I40E_MIN_MSIX, vectors);
10909 dev_info(&pf->pdev->dev,
10910 "MSI-X vector reservation failed: %d\n", vectors);
10918 * i40e_init_msix - Setup the MSIX capability
10919 * @pf: board private structure
10921 * Work with the OS to set up the MSIX vectors needed.
10923 * Returns the number of vectors reserved or negative on failure
10925 static int i40e_init_msix(struct i40e_pf *pf)
10927 struct i40e_hw *hw = &pf->hw;
10928 int cpus, extra_vectors;
10932 int iwarp_requested = 0;
10934 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10937 /* The number of vectors we'll request will be comprised of:
10938 * - Add 1 for "other" cause for Admin Queue events, etc.
10939 * - The number of LAN queue pairs
10940 * - Queues being used for RSS.
10941 * We don't need as many as max_rss_size vectors.
10942 * use rss_size instead in the calculation since that
10943 * is governed by number of cpus in the system.
10944 * - assumes symmetric Tx/Rx pairing
10945 * - The number of VMDq pairs
10946 * - The CPU count within the NUMA node if iWARP is enabled
10947 * Once we count this up, try the request.
10949 * If we can't get what we want, we'll simplify to nearly nothing
10950 * and try again. If that still fails, we punt.
10952 vectors_left = hw->func_caps.num_msix_vectors;
10955 /* reserve one vector for miscellaneous handler */
10956 if (vectors_left) {
10961 /* reserve some vectors for the main PF traffic queues. Initially we
10962 * only reserve at most 50% of the available vectors, in the case that
10963 * the number of online CPUs is large. This ensures that we can enable
10964 * extra features as well. Once we've enabled the other features, we
10965 * will use any remaining vectors to reach as close as we can to the
10966 * number of online CPUs.
10968 cpus = num_online_cpus();
10969 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10970 vectors_left -= pf->num_lan_msix;
10972 /* reserve one vector for sideband flow director */
10973 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10974 if (vectors_left) {
10975 pf->num_fdsb_msix = 1;
10979 pf->num_fdsb_msix = 0;
10983 /* can we reserve enough for iWARP? */
10984 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10985 iwarp_requested = pf->num_iwarp_msix;
10988 pf->num_iwarp_msix = 0;
10989 else if (vectors_left < pf->num_iwarp_msix)
10990 pf->num_iwarp_msix = 1;
10991 v_budget += pf->num_iwarp_msix;
10992 vectors_left -= pf->num_iwarp_msix;
10995 /* any vectors left over go for VMDq support */
10996 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10997 if (!vectors_left) {
10998 pf->num_vmdq_msix = 0;
10999 pf->num_vmdq_qps = 0;
11001 int vmdq_vecs_wanted =
11002 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11004 min_t(int, vectors_left, vmdq_vecs_wanted);
11006 /* if we're short on vectors for what's desired, we limit
11007 * the queues per vmdq. If this is still more than are
11008 * available, the user will need to change the number of
11009 * queues/vectors used by the PF later with the ethtool
11012 if (vectors_left < vmdq_vecs_wanted) {
11013 pf->num_vmdq_qps = 1;
11014 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11015 vmdq_vecs = min_t(int,
11019 pf->num_vmdq_msix = pf->num_vmdq_qps;
11021 v_budget += vmdq_vecs;
11022 vectors_left -= vmdq_vecs;
11026 /* On systems with a large number of SMP cores, we previously limited
11027 * the number of vectors for num_lan_msix to be at most 50% of the
11028 * available vectors, to allow for other features. Now, we add back
11029 * the remaining vectors. However, we ensure that the total
11030 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11031 * calculate the number of vectors we can add without going over the
11032 * cap of CPUs. For systems with a small number of CPUs this will be
11035 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11036 pf->num_lan_msix += extra_vectors;
11037 vectors_left -= extra_vectors;
11039 WARN(vectors_left < 0,
11040 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11042 v_budget += pf->num_lan_msix;
11043 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11045 if (!pf->msix_entries)
11048 for (i = 0; i < v_budget; i++)
11049 pf->msix_entries[i].entry = i;
11050 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11052 if (v_actual < I40E_MIN_MSIX) {
11053 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11054 kfree(pf->msix_entries);
11055 pf->msix_entries = NULL;
11056 pci_disable_msix(pf->pdev);
11059 } else if (v_actual == I40E_MIN_MSIX) {
11060 /* Adjust for minimal MSIX use */
11061 pf->num_vmdq_vsis = 0;
11062 pf->num_vmdq_qps = 0;
11063 pf->num_lan_qps = 1;
11064 pf->num_lan_msix = 1;
11066 } else if (v_actual != v_budget) {
11067 /* If we have limited resources, we will start with no vectors
11068 * for the special features and then allocate vectors to some
11069 * of these features based on the policy and at the end disable
11070 * the features that did not get any vectors.
11074 dev_info(&pf->pdev->dev,
11075 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11076 v_actual, v_budget);
11077 /* reserve the misc vector */
11078 vec = v_actual - 1;
11080 /* Scale vector usage down */
11081 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11082 pf->num_vmdq_vsis = 1;
11083 pf->num_vmdq_qps = 1;
11085 /* partition out the remaining vectors */
11088 pf->num_lan_msix = 1;
11091 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11092 pf->num_lan_msix = 1;
11093 pf->num_iwarp_msix = 1;
11095 pf->num_lan_msix = 2;
11099 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11100 pf->num_iwarp_msix = min_t(int, (vec / 3),
11102 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11103 I40E_DEFAULT_NUM_VMDQ_VSI);
11105 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11106 I40E_DEFAULT_NUM_VMDQ_VSI);
11108 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11109 pf->num_fdsb_msix = 1;
11112 pf->num_lan_msix = min_t(int,
11113 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11115 pf->num_lan_qps = pf->num_lan_msix;
11120 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11121 (pf->num_fdsb_msix == 0)) {
11122 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11123 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11124 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11126 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11127 (pf->num_vmdq_msix == 0)) {
11128 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11129 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11132 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11133 (pf->num_iwarp_msix == 0)) {
11134 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11135 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11137 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11138 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11140 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11142 pf->num_iwarp_msix);
11148 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11149 * @vsi: the VSI being configured
11150 * @v_idx: index of the vector in the vsi struct
11151 * @cpu: cpu to be used on affinity_mask
11153 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11155 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11157 struct i40e_q_vector *q_vector;
11159 /* allocate q_vector */
11160 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11164 q_vector->vsi = vsi;
11165 q_vector->v_idx = v_idx;
11166 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11169 netif_napi_add(vsi->netdev, &q_vector->napi,
11170 i40e_napi_poll, NAPI_POLL_WEIGHT);
11172 /* tie q_vector and vsi together */
11173 vsi->q_vectors[v_idx] = q_vector;
11179 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11180 * @vsi: the VSI being configured
11182 * We allocate one q_vector per queue interrupt. If allocation fails we
11185 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11187 struct i40e_pf *pf = vsi->back;
11188 int err, v_idx, num_q_vectors, current_cpu;
11190 /* if not MSIX, give the one vector only to the LAN VSI */
11191 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11192 num_q_vectors = vsi->num_q_vectors;
11193 else if (vsi == pf->vsi[pf->lan_vsi])
11198 current_cpu = cpumask_first(cpu_online_mask);
11200 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11201 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11204 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
11205 if (unlikely(current_cpu >= nr_cpu_ids))
11206 current_cpu = cpumask_first(cpu_online_mask);
11213 i40e_free_q_vector(vsi, v_idx);
11219 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
11220 * @pf: board private structure to initialize
11222 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11227 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11228 vectors = i40e_init_msix(pf);
11230 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11231 I40E_FLAG_IWARP_ENABLED |
11232 I40E_FLAG_RSS_ENABLED |
11233 I40E_FLAG_DCB_CAPABLE |
11234 I40E_FLAG_DCB_ENABLED |
11235 I40E_FLAG_SRIOV_ENABLED |
11236 I40E_FLAG_FD_SB_ENABLED |
11237 I40E_FLAG_FD_ATR_ENABLED |
11238 I40E_FLAG_VMDQ_ENABLED);
11239 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11241 /* rework the queue expectations without MSIX */
11242 i40e_determine_queue_usage(pf);
11246 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11247 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11248 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11249 vectors = pci_enable_msi(pf->pdev);
11251 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11253 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11255 vectors = 1; /* one MSI or Legacy vector */
11258 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11259 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11261 /* set up vector assignment tracking */
11262 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11263 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11267 pf->irq_pile->num_entries = vectors;
11268 pf->irq_pile->search_hint = 0;
11270 /* track first vector for misc interrupts, ignore return */
11271 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11277 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
11278 * @pf: private board data structure
11280 * Restore the interrupt scheme that was cleared when we suspended the
11281 * device. This should be called during resume to re-allocate the q_vectors
11282 * and reacquire IRQs.
11284 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11288 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
11289 * scheme. We need to re-enabled them here in order to attempt to
11290 * re-acquire the MSI or MSI-X vectors
11292 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11294 err = i40e_init_interrupt_scheme(pf);
11298 /* Now that we've re-acquired IRQs, we need to remap the vectors and
11299 * rings together again.
11301 for (i = 0; i < pf->num_alloc_vsi; i++) {
11303 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11306 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11310 err = i40e_setup_misc_vector(pf);
11314 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11315 i40e_client_update_msix_info(pf);
11322 i40e_vsi_free_q_vectors(pf->vsi[i]);
11329 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
11330 * non queue events in recovery mode
11331 * @pf: board private structure
11333 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
11334 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
11335 * This is handled differently than in recovery mode since no Tx/Rx resources
11336 * are being allocated.
11338 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11342 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11343 err = i40e_setup_misc_vector(pf);
11346 dev_info(&pf->pdev->dev,
11347 "MSI-X misc vector request failed, error %d\n",
11352 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11354 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11358 dev_info(&pf->pdev->dev,
11359 "MSI/legacy misc vector request failed, error %d\n",
11363 i40e_enable_misc_int_causes(pf);
11364 i40e_irq_dynamic_enable_icr0(pf);
11371 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
11372 * @pf: board private structure
11374 * This sets up the handler for MSIX 0, which is used to manage the
11375 * non-queue interrupts, e.g. AdminQ and errors. This is not used
11376 * when in MSI or Legacy interrupt mode.
11378 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11380 struct i40e_hw *hw = &pf->hw;
11383 /* Only request the IRQ once, the first time through. */
11384 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11385 err = request_irq(pf->msix_entries[0].vector,
11386 i40e_intr, 0, pf->int_name, pf);
11388 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11389 dev_info(&pf->pdev->dev,
11390 "request_irq for %s failed: %d\n",
11391 pf->int_name, err);
11396 i40e_enable_misc_int_causes(pf);
11398 /* associate no queues to the misc vector */
11399 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11400 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11404 i40e_irq_dynamic_enable_icr0(pf);
11410 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
11411 * @vsi: Pointer to vsi structure
11412 * @seed: Buffter to store the hash keys
11413 * @lut: Buffer to store the lookup table entries
11414 * @lut_size: Size of buffer to store the lookup table entries
11416 * Return 0 on success, negative on failure
11418 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11419 u8 *lut, u16 lut_size)
11421 struct i40e_pf *pf = vsi->back;
11422 struct i40e_hw *hw = &pf->hw;
11426 ret = i40e_aq_get_rss_key(hw, vsi->id,
11427 (struct i40e_aqc_get_set_rss_key_data *)seed);
11429 dev_info(&pf->pdev->dev,
11430 "Cannot get RSS key, err %s aq_err %s\n",
11431 i40e_stat_str(&pf->hw, ret),
11432 i40e_aq_str(&pf->hw,
11433 pf->hw.aq.asq_last_status));
11439 bool pf_lut = vsi->type == I40E_VSI_MAIN;
11441 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11443 dev_info(&pf->pdev->dev,
11444 "Cannot get RSS lut, err %s aq_err %s\n",
11445 i40e_stat_str(&pf->hw, ret),
11446 i40e_aq_str(&pf->hw,
11447 pf->hw.aq.asq_last_status));
11456 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
11457 * @vsi: Pointer to vsi structure
11458 * @seed: RSS hash seed
11459 * @lut: Lookup table
11460 * @lut_size: Lookup table size
11462 * Returns 0 on success, negative on failure
11464 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11465 const u8 *lut, u16 lut_size)
11467 struct i40e_pf *pf = vsi->back;
11468 struct i40e_hw *hw = &pf->hw;
11469 u16 vf_id = vsi->vf_id;
11472 /* Fill out hash function seed */
11474 u32 *seed_dw = (u32 *)seed;
11476 if (vsi->type == I40E_VSI_MAIN) {
11477 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11478 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11479 } else if (vsi->type == I40E_VSI_SRIOV) {
11480 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11481 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11483 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11488 u32 *lut_dw = (u32 *)lut;
11490 if (vsi->type == I40E_VSI_MAIN) {
11491 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11493 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11494 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11495 } else if (vsi->type == I40E_VSI_SRIOV) {
11496 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11498 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11499 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11501 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11510 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
11511 * @vsi: Pointer to VSI structure
11512 * @seed: Buffer to store the keys
11513 * @lut: Buffer to store the lookup table entries
11514 * @lut_size: Size of buffer to store the lookup table entries
11516 * Returns 0 on success, negative on failure
11518 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11519 u8 *lut, u16 lut_size)
11521 struct i40e_pf *pf = vsi->back;
11522 struct i40e_hw *hw = &pf->hw;
11526 u32 *seed_dw = (u32 *)seed;
11528 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11529 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11532 u32 *lut_dw = (u32 *)lut;
11534 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11536 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11537 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11544 * i40e_config_rss - Configure RSS keys and lut
11545 * @vsi: Pointer to VSI structure
11546 * @seed: RSS hash seed
11547 * @lut: Lookup table
11548 * @lut_size: Lookup table size
11550 * Returns 0 on success, negative on failure
11552 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11554 struct i40e_pf *pf = vsi->back;
11556 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11557 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11559 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11563 * i40e_get_rss - Get RSS keys and lut
11564 * @vsi: Pointer to VSI structure
11565 * @seed: Buffer to store the keys
11566 * @lut: Buffer to store the lookup table entries
11567 * @lut_size: Size of buffer to store the lookup table entries
11569 * Returns 0 on success, negative on failure
11571 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11573 struct i40e_pf *pf = vsi->back;
11575 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11576 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11578 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11582 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
11583 * @pf: Pointer to board private structure
11584 * @lut: Lookup table
11585 * @rss_table_size: Lookup table size
11586 * @rss_size: Range of queue number for hashing
11588 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11589 u16 rss_table_size, u16 rss_size)
11593 for (i = 0; i < rss_table_size; i++)
11594 lut[i] = i % rss_size;
11598 * i40e_pf_config_rss - Prepare for RSS if used
11599 * @pf: board private structure
11601 static int i40e_pf_config_rss(struct i40e_pf *pf)
11603 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11604 u8 seed[I40E_HKEY_ARRAY_SIZE];
11606 struct i40e_hw *hw = &pf->hw;
11611 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
11612 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11613 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11614 hena |= i40e_pf_get_default_rss_hena(pf);
11616 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11617 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11619 /* Determine the RSS table size based on the hardware capabilities */
11620 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11621 reg_val = (pf->rss_table_size == 512) ?
11622 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11623 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11624 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11626 /* Determine the RSS size of the VSI */
11627 if (!vsi->rss_size) {
11629 /* If the firmware does something weird during VSI init, we
11630 * could end up with zero TCs. Check for that to avoid
11631 * divide-by-zero. It probably won't pass traffic, but it also
11634 qcount = vsi->num_queue_pairs /
11635 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11636 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11638 if (!vsi->rss_size)
11641 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11645 /* Use user configured lut if there is one, otherwise use default */
11646 if (vsi->rss_lut_user)
11647 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11649 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11651 /* Use user configured hash key if there is one, otherwise
11654 if (vsi->rss_hkey_user)
11655 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11657 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11658 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11665 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11666 * @pf: board private structure
11667 * @queue_count: the requested queue count for rss.
11669 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11670 * count which may be different from the requested queue count.
11671 * Note: expects to be called while under rtnl_lock()
11673 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11675 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11678 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11681 queue_count = min_t(int, queue_count, num_online_cpus());
11682 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11684 if (queue_count != vsi->num_queue_pairs) {
11687 vsi->req_queue_pairs = queue_count;
11688 i40e_prep_for_reset(pf, true);
11690 pf->alloc_rss_size = new_rss_size;
11692 i40e_reset_and_rebuild(pf, true, true);
11694 /* Discard the user configured hash keys and lut, if less
11695 * queues are enabled.
11697 if (queue_count < vsi->rss_size) {
11698 i40e_clear_rss_config_user(vsi);
11699 dev_dbg(&pf->pdev->dev,
11700 "discard user configured hash keys and lut\n");
11703 /* Reset vsi->rss_size, as number of enabled queues changed */
11704 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11705 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11707 i40e_pf_config_rss(pf);
11709 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11710 vsi->req_queue_pairs, pf->rss_size_max);
11711 return pf->alloc_rss_size;
11715 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11716 * @pf: board private structure
11718 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11720 i40e_status status;
11721 bool min_valid, max_valid;
11722 u32 max_bw, min_bw;
11724 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11725 &min_valid, &max_valid);
11729 pf->min_bw = min_bw;
11731 pf->max_bw = max_bw;
11738 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11739 * @pf: board private structure
11741 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11743 struct i40e_aqc_configure_partition_bw_data bw_data;
11744 i40e_status status;
11746 /* Set the valid bit for this PF */
11747 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11748 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11749 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11751 /* Set the new bandwidths */
11752 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11758 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11759 * @pf: board private structure
11761 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11763 /* Commit temporary BW setting to permanent NVM image */
11764 enum i40e_admin_queue_err last_aq_status;
11768 if (pf->hw.partition_id != 1) {
11769 dev_info(&pf->pdev->dev,
11770 "Commit BW only works on partition 1! This is partition %d",
11771 pf->hw.partition_id);
11772 ret = I40E_NOT_SUPPORTED;
11773 goto bw_commit_out;
11776 /* Acquire NVM for read access */
11777 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11778 last_aq_status = pf->hw.aq.asq_last_status;
11780 dev_info(&pf->pdev->dev,
11781 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11782 i40e_stat_str(&pf->hw, ret),
11783 i40e_aq_str(&pf->hw, last_aq_status));
11784 goto bw_commit_out;
11787 /* Read word 0x10 of NVM - SW compatibility word 1 */
11788 ret = i40e_aq_read_nvm(&pf->hw,
11789 I40E_SR_NVM_CONTROL_WORD,
11790 0x10, sizeof(nvm_word), &nvm_word,
11792 /* Save off last admin queue command status before releasing
11795 last_aq_status = pf->hw.aq.asq_last_status;
11796 i40e_release_nvm(&pf->hw);
11798 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11799 i40e_stat_str(&pf->hw, ret),
11800 i40e_aq_str(&pf->hw, last_aq_status));
11801 goto bw_commit_out;
11804 /* Wait a bit for NVM release to complete */
11807 /* Acquire NVM for write access */
11808 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11809 last_aq_status = pf->hw.aq.asq_last_status;
11811 dev_info(&pf->pdev->dev,
11812 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11813 i40e_stat_str(&pf->hw, ret),
11814 i40e_aq_str(&pf->hw, last_aq_status));
11815 goto bw_commit_out;
11817 /* Write it back out unchanged to initiate update NVM,
11818 * which will force a write of the shadow (alt) RAM to
11819 * the NVM - thus storing the bandwidth values permanently.
11821 ret = i40e_aq_update_nvm(&pf->hw,
11822 I40E_SR_NVM_CONTROL_WORD,
11823 0x10, sizeof(nvm_word),
11824 &nvm_word, true, 0, NULL);
11825 /* Save off last admin queue command status before releasing
11828 last_aq_status = pf->hw.aq.asq_last_status;
11829 i40e_release_nvm(&pf->hw);
11831 dev_info(&pf->pdev->dev,
11832 "BW settings NOT SAVED, err %s aq_err %s\n",
11833 i40e_stat_str(&pf->hw, ret),
11834 i40e_aq_str(&pf->hw, last_aq_status));
11841 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11842 * @pf: board private structure to initialize
11844 * i40e_sw_init initializes the Adapter private data structure.
11845 * Fields are initialized based on PCI device information and
11846 * OS network device settings (MTU size).
11848 static int i40e_sw_init(struct i40e_pf *pf)
11853 /* Set default capability flags */
11854 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11855 I40E_FLAG_MSI_ENABLED |
11856 I40E_FLAG_MSIX_ENABLED;
11858 /* Set default ITR */
11859 pf->rx_itr_default = I40E_ITR_RX_DEF;
11860 pf->tx_itr_default = I40E_ITR_TX_DEF;
11862 /* Depending on PF configurations, it is possible that the RSS
11863 * maximum might end up larger than the available queues
11865 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11866 pf->alloc_rss_size = 1;
11867 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11868 pf->rss_size_max = min_t(int, pf->rss_size_max,
11869 pf->hw.func_caps.num_tx_qp);
11870 if (pf->hw.func_caps.rss) {
11871 pf->flags |= I40E_FLAG_RSS_ENABLED;
11872 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11873 num_online_cpus());
11876 /* MFP mode enabled */
11877 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11878 pf->flags |= I40E_FLAG_MFP_ENABLED;
11879 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11880 if (i40e_get_partition_bw_setting(pf)) {
11881 dev_warn(&pf->pdev->dev,
11882 "Could not get partition bw settings\n");
11884 dev_info(&pf->pdev->dev,
11885 "Partition BW Min = %8.8x, Max = %8.8x\n",
11886 pf->min_bw, pf->max_bw);
11888 /* nudge the Tx scheduler */
11889 i40e_set_partition_bw_setting(pf);
11893 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11894 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11895 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11896 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11897 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11898 pf->hw.num_partitions > 1)
11899 dev_info(&pf->pdev->dev,
11900 "Flow Director Sideband mode Disabled in MFP mode\n");
11902 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11903 pf->fdir_pf_filter_count =
11904 pf->hw.func_caps.fd_filters_guaranteed;
11905 pf->hw.fdir_shared_filter_count =
11906 pf->hw.func_caps.fd_filters_best_effort;
11909 if (pf->hw.mac.type == I40E_MAC_X722) {
11910 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11911 I40E_HW_128_QP_RSS_CAPABLE |
11912 I40E_HW_ATR_EVICT_CAPABLE |
11913 I40E_HW_WB_ON_ITR_CAPABLE |
11914 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11915 I40E_HW_NO_PCI_LINK_CHECK |
11916 I40E_HW_USE_SET_LLDP_MIB |
11917 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11918 I40E_HW_PTP_L4_CAPABLE |
11919 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11920 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11922 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11923 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11924 I40E_FDEVICT_PCTYPE_DEFAULT) {
11925 dev_warn(&pf->pdev->dev,
11926 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11927 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11929 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11930 ((pf->hw.aq.api_maj_ver == 1) &&
11931 (pf->hw.aq.api_min_ver > 4))) {
11932 /* Supported in FW API version higher than 1.4 */
11933 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11936 /* Enable HW ATR eviction if possible */
11937 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11938 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11940 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11941 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11942 (pf->hw.aq.fw_maj_ver < 4))) {
11943 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11944 /* No DCB support for FW < v4.33 */
11945 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11948 /* Disable FW LLDP if FW < v4.3 */
11949 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11950 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11951 (pf->hw.aq.fw_maj_ver < 4)))
11952 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11954 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11955 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11956 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11957 (pf->hw.aq.fw_maj_ver >= 5)))
11958 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11960 /* Enable PTP L4 if FW > v6.0 */
11961 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11962 pf->hw.aq.fw_maj_ver >= 6)
11963 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11965 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11966 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11967 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11968 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11971 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11972 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11973 /* IWARP needs one extra vector for CQP just like MISC.*/
11974 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11976 /* Stopping FW LLDP engine is supported on XL710 and X722
11977 * starting from FW versions determined in i40e_init_adminq.
11978 * Stopping the FW LLDP engine is not supported on XL710
11979 * if NPAR is functioning so unset this hw flag in this case.
11981 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11982 pf->hw.func_caps.npar_enable &&
11983 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11984 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11986 #ifdef CONFIG_PCI_IOV
11987 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11988 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11989 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11990 pf->num_req_vfs = min_t(int,
11991 pf->hw.func_caps.num_vfs,
11992 I40E_MAX_VF_COUNT);
11994 #endif /* CONFIG_PCI_IOV */
11995 pf->eeprom_version = 0xDEAD;
11996 pf->lan_veb = I40E_NO_VEB;
11997 pf->lan_vsi = I40E_NO_VSI;
11999 /* By default FW has this off for performance reasons */
12000 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12002 /* set up queue assignment tracking */
12003 size = sizeof(struct i40e_lump_tracking)
12004 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12005 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12006 if (!pf->qp_pile) {
12010 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12011 pf->qp_pile->search_hint = 0;
12013 pf->tx_timeout_recovery_level = 1;
12015 mutex_init(&pf->switch_mutex);
12022 * i40e_set_ntuple - set the ntuple feature flag and take action
12023 * @pf: board private structure to initialize
12024 * @features: the feature set that the stack is suggesting
12026 * returns a bool to indicate if reset needs to happen
12028 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12030 bool need_reset = false;
12032 /* Check if Flow Director n-tuple support was enabled or disabled. If
12033 * the state changed, we need to reset.
12035 if (features & NETIF_F_NTUPLE) {
12036 /* Enable filters and mark for reset */
12037 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12039 /* enable FD_SB only if there is MSI-X vector and no cloud
12042 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12043 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12044 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12047 /* turn off filters, mark for reset and clear SW filter list */
12048 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12050 i40e_fdir_filter_exit(pf);
12052 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12053 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12054 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12056 /* reset fd counters */
12057 pf->fd_add_err = 0;
12058 pf->fd_atr_cnt = 0;
12059 /* if ATR was auto disabled it can be re-enabled. */
12060 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12061 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12062 (I40E_DEBUG_FD & pf->hw.debug_mask))
12063 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12069 * i40e_clear_rss_lut - clear the rx hash lookup table
12070 * @vsi: the VSI being configured
12072 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12074 struct i40e_pf *pf = vsi->back;
12075 struct i40e_hw *hw = &pf->hw;
12076 u16 vf_id = vsi->vf_id;
12079 if (vsi->type == I40E_VSI_MAIN) {
12080 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12081 wr32(hw, I40E_PFQF_HLUT(i), 0);
12082 } else if (vsi->type == I40E_VSI_SRIOV) {
12083 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12084 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12086 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12091 * i40e_set_features - set the netdev feature flags
12092 * @netdev: ptr to the netdev being adjusted
12093 * @features: the feature set that the stack is suggesting
12094 * Note: expects to be called while under rtnl_lock()
12096 static int i40e_set_features(struct net_device *netdev,
12097 netdev_features_t features)
12099 struct i40e_netdev_priv *np = netdev_priv(netdev);
12100 struct i40e_vsi *vsi = np->vsi;
12101 struct i40e_pf *pf = vsi->back;
12104 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12105 i40e_pf_config_rss(pf);
12106 else if (!(features & NETIF_F_RXHASH) &&
12107 netdev->features & NETIF_F_RXHASH)
12108 i40e_clear_rss_lut(vsi);
12110 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12111 i40e_vlan_stripping_enable(vsi);
12113 i40e_vlan_stripping_disable(vsi);
12115 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12116 dev_err(&pf->pdev->dev,
12117 "Offloaded tc filters active, can't turn hw_tc_offload off");
12121 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12122 i40e_del_all_macvlans(vsi);
12124 need_reset = i40e_set_ntuple(pf, features);
12127 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12133 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
12134 * @pf: board private structure
12135 * @port: The UDP port to look up
12137 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
12139 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
12143 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
12144 /* Do not report ports with pending deletions as
12147 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
12149 if (pf->udp_ports[i].port == port)
12157 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
12158 * @netdev: This physical port's netdev
12159 * @ti: Tunnel endpoint information
12161 static void i40e_udp_tunnel_add(struct net_device *netdev,
12162 struct udp_tunnel_info *ti)
12164 struct i40e_netdev_priv *np = netdev_priv(netdev);
12165 struct i40e_vsi *vsi = np->vsi;
12166 struct i40e_pf *pf = vsi->back;
12167 u16 port = ntohs(ti->port);
12171 idx = i40e_get_udp_port_idx(pf, port);
12173 /* Check if port already exists */
12174 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12175 netdev_info(netdev, "port %d already offloaded\n", port);
12179 /* Now check if there is space to add the new port */
12180 next_idx = i40e_get_udp_port_idx(pf, 0);
12182 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12183 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
12188 switch (ti->type) {
12189 case UDP_TUNNEL_TYPE_VXLAN:
12190 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
12192 case UDP_TUNNEL_TYPE_GENEVE:
12193 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
12195 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
12201 /* New port: add it and mark its index in the bitmap */
12202 pf->udp_ports[next_idx].port = port;
12203 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
12204 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
12205 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12209 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
12210 * @netdev: This physical port's netdev
12211 * @ti: Tunnel endpoint information
12213 static void i40e_udp_tunnel_del(struct net_device *netdev,
12214 struct udp_tunnel_info *ti)
12216 struct i40e_netdev_priv *np = netdev_priv(netdev);
12217 struct i40e_vsi *vsi = np->vsi;
12218 struct i40e_pf *pf = vsi->back;
12219 u16 port = ntohs(ti->port);
12222 idx = i40e_get_udp_port_idx(pf, port);
12224 /* Check if port already exists */
12225 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
12228 switch (ti->type) {
12229 case UDP_TUNNEL_TYPE_VXLAN:
12230 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
12233 case UDP_TUNNEL_TYPE_GENEVE:
12234 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
12241 /* if port exists, set it to 0 (mark for deletion)
12242 * and make it pending
12244 pf->udp_ports[idx].port = 0;
12246 /* Toggle pending bit instead of setting it. This way if we are
12247 * deleting a port that has yet to be added we just clear the pending
12248 * bit and don't have to worry about it.
12250 pf->pending_udp_bitmap ^= BIT_ULL(idx);
12251 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12255 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
12259 static int i40e_get_phys_port_id(struct net_device *netdev,
12260 struct netdev_phys_item_id *ppid)
12262 struct i40e_netdev_priv *np = netdev_priv(netdev);
12263 struct i40e_pf *pf = np->vsi->back;
12264 struct i40e_hw *hw = &pf->hw;
12266 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12267 return -EOPNOTSUPP;
12269 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12270 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12276 * i40e_ndo_fdb_add - add an entry to the hardware database
12277 * @ndm: the input from the stack
12278 * @tb: pointer to array of nladdr (unused)
12279 * @dev: the net device pointer
12280 * @addr: the MAC address entry being added
12282 * @flags: instructions from stack about fdb operation
12284 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12285 struct net_device *dev,
12286 const unsigned char *addr, u16 vid,
12288 struct netlink_ext_ack *extack)
12290 struct i40e_netdev_priv *np = netdev_priv(dev);
12291 struct i40e_pf *pf = np->vsi->back;
12294 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12295 return -EOPNOTSUPP;
12298 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12302 /* Hardware does not support aging addresses so if a
12303 * ndm_state is given only allow permanent addresses
12305 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12306 netdev_info(dev, "FDB only supports static addresses\n");
12310 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12311 err = dev_uc_add_excl(dev, addr);
12312 else if (is_multicast_ether_addr(addr))
12313 err = dev_mc_add_excl(dev, addr);
12317 /* Only return duplicate errors if NLM_F_EXCL is set */
12318 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12325 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
12326 * @dev: the netdev being configured
12327 * @nlh: RTNL message
12328 * @flags: bridge flags
12329 * @extack: netlink extended ack
12331 * Inserts a new hardware bridge if not already created and
12332 * enables the bridging mode requested (VEB or VEPA). If the
12333 * hardware bridge has already been inserted and the request
12334 * is to change the mode then that requires a PF reset to
12335 * allow rebuild of the components with required hardware
12336 * bridge mode enabled.
12338 * Note: expects to be called while under rtnl_lock()
12340 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12341 struct nlmsghdr *nlh,
12343 struct netlink_ext_ack *extack)
12345 struct i40e_netdev_priv *np = netdev_priv(dev);
12346 struct i40e_vsi *vsi = np->vsi;
12347 struct i40e_pf *pf = vsi->back;
12348 struct i40e_veb *veb = NULL;
12349 struct nlattr *attr, *br_spec;
12352 /* Only for PF VSI for now */
12353 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12354 return -EOPNOTSUPP;
12356 /* Find the HW bridge for PF VSI */
12357 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12358 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12362 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12364 nla_for_each_nested(attr, br_spec, rem) {
12367 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12370 mode = nla_get_u16(attr);
12371 if ((mode != BRIDGE_MODE_VEPA) &&
12372 (mode != BRIDGE_MODE_VEB))
12375 /* Insert a new HW bridge */
12377 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12378 vsi->tc_config.enabled_tc);
12380 veb->bridge_mode = mode;
12381 i40e_config_bridge_mode(veb);
12383 /* No Bridge HW offload available */
12387 } else if (mode != veb->bridge_mode) {
12388 /* Existing HW bridge but different mode needs reset */
12389 veb->bridge_mode = mode;
12390 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
12391 if (mode == BRIDGE_MODE_VEB)
12392 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12394 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12395 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12404 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
12407 * @seq: RTNL message seq #
12408 * @dev: the netdev being configured
12409 * @filter_mask: unused
12410 * @nlflags: netlink flags passed in
12412 * Return the mode in which the hardware bridge is operating in
12415 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12416 struct net_device *dev,
12417 u32 __always_unused filter_mask,
12420 struct i40e_netdev_priv *np = netdev_priv(dev);
12421 struct i40e_vsi *vsi = np->vsi;
12422 struct i40e_pf *pf = vsi->back;
12423 struct i40e_veb *veb = NULL;
12426 /* Only for PF VSI for now */
12427 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12428 return -EOPNOTSUPP;
12430 /* Find the HW bridge for the PF VSI */
12431 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12432 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12439 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12440 0, 0, nlflags, filter_mask, NULL);
12444 * i40e_features_check - Validate encapsulated packet conforms to limits
12446 * @dev: This physical port's netdev
12447 * @features: Offload features that the stack believes apply
12449 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12450 struct net_device *dev,
12451 netdev_features_t features)
12455 /* No point in doing any of this if neither checksum nor GSO are
12456 * being requested for this frame. We can rule out both by just
12457 * checking for CHECKSUM_PARTIAL
12459 if (skb->ip_summed != CHECKSUM_PARTIAL)
12462 /* We cannot support GSO if the MSS is going to be less than
12463 * 64 bytes. If it is then we need to drop support for GSO.
12465 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12466 features &= ~NETIF_F_GSO_MASK;
12468 /* MACLEN can support at most 63 words */
12469 len = skb_network_header(skb) - skb->data;
12470 if (len & ~(63 * 2))
12473 /* IPLEN and EIPLEN can support at most 127 dwords */
12474 len = skb_transport_header(skb) - skb_network_header(skb);
12475 if (len & ~(127 * 4))
12478 if (skb->encapsulation) {
12479 /* L4TUNLEN can support 127 words */
12480 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12481 if (len & ~(127 * 2))
12484 /* IPLEN can support at most 127 dwords */
12485 len = skb_inner_transport_header(skb) -
12486 skb_inner_network_header(skb);
12487 if (len & ~(127 * 4))
12491 /* No need to validate L4LEN as TCP is the only protocol with a
12492 * a flexible value and we support all possible values supported
12493 * by TCP, which is at most 15 dwords
12498 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12502 * i40e_xdp_setup - add/remove an XDP program
12503 * @vsi: VSI to changed
12504 * @prog: XDP program
12506 static int i40e_xdp_setup(struct i40e_vsi *vsi,
12507 struct bpf_prog *prog)
12509 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12510 struct i40e_pf *pf = vsi->back;
12511 struct bpf_prog *old_prog;
12515 /* Don't allow frames that span over multiple buffers */
12516 if (frame_size > vsi->rx_buf_len)
12519 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12522 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12523 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12526 i40e_prep_for_reset(pf, true);
12528 old_prog = xchg(&vsi->xdp_prog, prog);
12532 /* Wait until ndo_xsk_wakeup completes. */
12534 i40e_reset_and_rebuild(pf, true, true);
12537 for (i = 0; i < vsi->num_queue_pairs; i++)
12538 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12541 bpf_prog_put(old_prog);
12543 /* Kick start the NAPI context if there is an AF_XDP socket open
12544 * on that queue id. This so that receiving will start.
12546 if (need_reset && prog)
12547 for (i = 0; i < vsi->num_queue_pairs; i++)
12548 if (vsi->xdp_rings[i]->xsk_umem)
12549 (void)i40e_xsk_wakeup(vsi->netdev, i,
12556 * i40e_enter_busy_conf - Enters busy config state
12559 * Returns 0 on success, <0 for failure.
12561 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12563 struct i40e_pf *pf = vsi->back;
12566 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12570 usleep_range(1000, 2000);
12577 * i40e_exit_busy_conf - Exits busy config state
12580 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12582 struct i40e_pf *pf = vsi->back;
12584 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12588 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
12590 * @queue_pair: queue pair
12592 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12594 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12595 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12596 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12597 sizeof(vsi->tx_rings[queue_pair]->stats));
12598 if (i40e_enabled_xdp_vsi(vsi)) {
12599 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12600 sizeof(vsi->xdp_rings[queue_pair]->stats));
12605 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
12607 * @queue_pair: queue pair
12609 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12611 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12612 if (i40e_enabled_xdp_vsi(vsi)) {
12613 /* Make sure that in-progress ndo_xdp_xmit calls are
12617 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12619 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12623 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
12625 * @queue_pair: queue pair
12626 * @enable: true for enable, false for disable
12628 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12631 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12632 struct i40e_q_vector *q_vector = rxr->q_vector;
12637 /* All rings in a qp belong to the same qvector. */
12638 if (q_vector->rx.ring || q_vector->tx.ring) {
12640 napi_enable(&q_vector->napi);
12642 napi_disable(&q_vector->napi);
12647 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
12649 * @queue_pair: queue pair
12650 * @enable: true for enable, false for disable
12652 * Returns 0 on success, <0 on failure.
12654 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12657 struct i40e_pf *pf = vsi->back;
12660 pf_q = vsi->base_queue + queue_pair;
12661 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12662 false /*is xdp*/, enable);
12664 dev_info(&pf->pdev->dev,
12665 "VSI seid %d Tx ring %d %sable timeout\n",
12666 vsi->seid, pf_q, (enable ? "en" : "dis"));
12670 i40e_control_rx_q(pf, pf_q, enable);
12671 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12673 dev_info(&pf->pdev->dev,
12674 "VSI seid %d Rx ring %d %sable timeout\n",
12675 vsi->seid, pf_q, (enable ? "en" : "dis"));
12679 /* Due to HW errata, on Rx disable only, the register can
12680 * indicate done before it really is. Needs 50ms to be sure
12685 if (!i40e_enabled_xdp_vsi(vsi))
12688 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12689 pf_q + vsi->alloc_queue_pairs,
12690 true /*is xdp*/, enable);
12692 dev_info(&pf->pdev->dev,
12693 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12694 vsi->seid, pf_q, (enable ? "en" : "dis"));
12701 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12703 * @queue_pair: queue_pair
12705 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12707 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12708 struct i40e_pf *pf = vsi->back;
12709 struct i40e_hw *hw = &pf->hw;
12711 /* All rings in a qp belong to the same qvector. */
12712 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12713 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12715 i40e_irq_dynamic_enable_icr0(pf);
12721 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12723 * @queue_pair: queue_pair
12725 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12727 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12728 struct i40e_pf *pf = vsi->back;
12729 struct i40e_hw *hw = &pf->hw;
12731 /* For simplicity, instead of removing the qp interrupt causes
12732 * from the interrupt linked list, we simply disable the interrupt, and
12733 * leave the list intact.
12735 * All rings in a qp belong to the same qvector.
12737 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12738 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12740 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12742 synchronize_irq(pf->msix_entries[intpf].vector);
12744 /* Legacy and MSI mode - this stops all interrupt handling */
12745 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12746 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12748 synchronize_irq(pf->pdev->irq);
12753 * i40e_queue_pair_disable - Disables a queue pair
12755 * @queue_pair: queue pair
12757 * Returns 0 on success, <0 on failure.
12759 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12763 err = i40e_enter_busy_conf(vsi);
12767 i40e_queue_pair_disable_irq(vsi, queue_pair);
12768 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12769 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12770 i40e_queue_pair_clean_rings(vsi, queue_pair);
12771 i40e_queue_pair_reset_stats(vsi, queue_pair);
12777 * i40e_queue_pair_enable - Enables a queue pair
12779 * @queue_pair: queue pair
12781 * Returns 0 on success, <0 on failure.
12783 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12787 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12791 if (i40e_enabled_xdp_vsi(vsi)) {
12792 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12797 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12801 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12802 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12803 i40e_queue_pair_enable_irq(vsi, queue_pair);
12805 i40e_exit_busy_conf(vsi);
12811 * i40e_xdp - implements ndo_bpf for i40e
12813 * @xdp: XDP command
12815 static int i40e_xdp(struct net_device *dev,
12816 struct netdev_bpf *xdp)
12818 struct i40e_netdev_priv *np = netdev_priv(dev);
12819 struct i40e_vsi *vsi = np->vsi;
12821 if (vsi->type != I40E_VSI_MAIN)
12824 switch (xdp->command) {
12825 case XDP_SETUP_PROG:
12826 return i40e_xdp_setup(vsi, xdp->prog);
12827 case XDP_QUERY_PROG:
12828 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12830 case XDP_SETUP_XSK_UMEM:
12831 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12832 xdp->xsk.queue_id);
12838 static const struct net_device_ops i40e_netdev_ops = {
12839 .ndo_open = i40e_open,
12840 .ndo_stop = i40e_close,
12841 .ndo_start_xmit = i40e_lan_xmit_frame,
12842 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12843 .ndo_set_rx_mode = i40e_set_rx_mode,
12844 .ndo_validate_addr = eth_validate_addr,
12845 .ndo_set_mac_address = i40e_set_mac,
12846 .ndo_change_mtu = i40e_change_mtu,
12847 .ndo_do_ioctl = i40e_ioctl,
12848 .ndo_tx_timeout = i40e_tx_timeout,
12849 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12850 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12851 #ifdef CONFIG_NET_POLL_CONTROLLER
12852 .ndo_poll_controller = i40e_netpoll,
12854 .ndo_setup_tc = __i40e_setup_tc,
12855 .ndo_set_features = i40e_set_features,
12856 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12857 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12858 .ndo_get_vf_stats = i40e_get_vf_stats,
12859 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12860 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12861 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12862 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12863 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12864 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12865 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12866 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12867 .ndo_fdb_add = i40e_ndo_fdb_add,
12868 .ndo_features_check = i40e_features_check,
12869 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12870 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12871 .ndo_bpf = i40e_xdp,
12872 .ndo_xdp_xmit = i40e_xdp_xmit,
12873 .ndo_xsk_wakeup = i40e_xsk_wakeup,
12874 .ndo_dfwd_add_station = i40e_fwd_add,
12875 .ndo_dfwd_del_station = i40e_fwd_del,
12879 * i40e_config_netdev - Setup the netdev flags
12880 * @vsi: the VSI being configured
12882 * Returns 0 on success, negative value on failure
12884 static int i40e_config_netdev(struct i40e_vsi *vsi)
12886 struct i40e_pf *pf = vsi->back;
12887 struct i40e_hw *hw = &pf->hw;
12888 struct i40e_netdev_priv *np;
12889 struct net_device *netdev;
12890 u8 broadcast[ETH_ALEN];
12891 u8 mac_addr[ETH_ALEN];
12893 netdev_features_t hw_enc_features;
12894 netdev_features_t hw_features;
12896 etherdev_size = sizeof(struct i40e_netdev_priv);
12897 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12901 vsi->netdev = netdev;
12902 np = netdev_priv(netdev);
12905 hw_enc_features = NETIF_F_SG |
12907 NETIF_F_IPV6_CSUM |
12909 NETIF_F_SOFT_FEATURES |
12914 NETIF_F_GSO_GRE_CSUM |
12915 NETIF_F_GSO_PARTIAL |
12916 NETIF_F_GSO_IPXIP4 |
12917 NETIF_F_GSO_IPXIP6 |
12918 NETIF_F_GSO_UDP_TUNNEL |
12919 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12920 NETIF_F_GSO_UDP_L4 |
12926 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12927 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12929 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12931 netdev->hw_enc_features |= hw_enc_features;
12933 /* record features VLANs can make use of */
12934 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12936 /* enable macvlan offloads */
12937 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
12939 hw_features = hw_enc_features |
12940 NETIF_F_HW_VLAN_CTAG_TX |
12941 NETIF_F_HW_VLAN_CTAG_RX;
12943 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12944 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12946 netdev->hw_features |= hw_features;
12948 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12949 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12951 if (vsi->type == I40E_VSI_MAIN) {
12952 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12953 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12954 /* The following steps are necessary for two reasons. First,
12955 * some older NVM configurations load a default MAC-VLAN
12956 * filter that will accept any tagged packet, and we want to
12957 * replace this with a normal filter. Additionally, it is
12958 * possible our MAC address was provided by the platform using
12959 * Open Firmware or similar.
12961 * Thus, we need to remove the default filter and install one
12962 * specific to the MAC address.
12964 i40e_rm_default_mac_filter(vsi, mac_addr);
12965 spin_lock_bh(&vsi->mac_filter_hash_lock);
12966 i40e_add_mac_filter(vsi, mac_addr);
12967 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12969 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12970 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12971 * the end, which is 4 bytes long, so force truncation of the
12972 * original name by IFNAMSIZ - 4
12974 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12976 pf->vsi[pf->lan_vsi]->netdev->name);
12977 eth_random_addr(mac_addr);
12979 spin_lock_bh(&vsi->mac_filter_hash_lock);
12980 i40e_add_mac_filter(vsi, mac_addr);
12981 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12984 /* Add the broadcast filter so that we initially will receive
12985 * broadcast packets. Note that when a new VLAN is first added the
12986 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12987 * specific filters as part of transitioning into "vlan" operation.
12988 * When more VLANs are added, the driver will copy each existing MAC
12989 * filter and add it for the new VLAN.
12991 * Broadcast filters are handled specially by
12992 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12993 * promiscuous bit instead of adding this directly as a MAC/VLAN
12994 * filter. The subtask will update the correct broadcast promiscuous
12995 * bits as VLANs become active or inactive.
12997 eth_broadcast_addr(broadcast);
12998 spin_lock_bh(&vsi->mac_filter_hash_lock);
12999 i40e_add_mac_filter(vsi, broadcast);
13000 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13002 ether_addr_copy(netdev->dev_addr, mac_addr);
13003 ether_addr_copy(netdev->perm_addr, mac_addr);
13005 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13006 netdev->neigh_priv_len = sizeof(u32) * 4;
13008 netdev->priv_flags |= IFF_UNICAST_FLT;
13009 netdev->priv_flags |= IFF_SUPP_NOFCS;
13010 /* Setup netdev TC information */
13011 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13013 netdev->netdev_ops = &i40e_netdev_ops;
13014 netdev->watchdog_timeo = 5 * HZ;
13015 i40e_set_ethtool_ops(netdev);
13017 /* MTU range: 68 - 9706 */
13018 netdev->min_mtu = ETH_MIN_MTU;
13019 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13025 * i40e_vsi_delete - Delete a VSI from the switch
13026 * @vsi: the VSI being removed
13028 * Returns 0 on success, negative value on failure
13030 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13032 /* remove default VSI is not allowed */
13033 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13036 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13040 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13041 * @vsi: the VSI being queried
13043 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13045 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13047 struct i40e_veb *veb;
13048 struct i40e_pf *pf = vsi->back;
13050 /* Uplink is not a bridge so default to VEB */
13051 if (vsi->veb_idx >= I40E_MAX_VEB)
13054 veb = pf->veb[vsi->veb_idx];
13056 dev_info(&pf->pdev->dev,
13057 "There is no veb associated with the bridge\n");
13061 /* Uplink is a bridge in VEPA mode */
13062 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13065 /* Uplink is a bridge in VEB mode */
13069 /* VEPA is now default bridge, so return 0 */
13074 * i40e_add_vsi - Add a VSI to the switch
13075 * @vsi: the VSI being configured
13077 * This initializes a VSI context depending on the VSI type to be added and
13078 * passes it down to the add_vsi aq command.
13080 static int i40e_add_vsi(struct i40e_vsi *vsi)
13083 struct i40e_pf *pf = vsi->back;
13084 struct i40e_hw *hw = &pf->hw;
13085 struct i40e_vsi_context ctxt;
13086 struct i40e_mac_filter *f;
13087 struct hlist_node *h;
13090 u8 enabled_tc = 0x1; /* TC0 enabled */
13093 memset(&ctxt, 0, sizeof(ctxt));
13094 switch (vsi->type) {
13095 case I40E_VSI_MAIN:
13096 /* The PF's main VSI is already setup as part of the
13097 * device initialization, so we'll not bother with
13098 * the add_vsi call, but we will retrieve the current
13101 ctxt.seid = pf->main_vsi_seid;
13102 ctxt.pf_num = pf->hw.pf_id;
13104 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13105 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13107 dev_info(&pf->pdev->dev,
13108 "couldn't get PF vsi config, err %s aq_err %s\n",
13109 i40e_stat_str(&pf->hw, ret),
13110 i40e_aq_str(&pf->hw,
13111 pf->hw.aq.asq_last_status));
13114 vsi->info = ctxt.info;
13115 vsi->info.valid_sections = 0;
13117 vsi->seid = ctxt.seid;
13118 vsi->id = ctxt.vsi_number;
13120 enabled_tc = i40e_pf_get_tc_map(pf);
13122 /* Source pruning is enabled by default, so the flag is
13123 * negative logic - if it's set, we need to fiddle with
13124 * the VSI to disable source pruning.
13126 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13127 memset(&ctxt, 0, sizeof(ctxt));
13128 ctxt.seid = pf->main_vsi_seid;
13129 ctxt.pf_num = pf->hw.pf_id;
13131 ctxt.info.valid_sections |=
13132 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13133 ctxt.info.switch_id =
13134 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13135 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13137 dev_info(&pf->pdev->dev,
13138 "update vsi failed, err %s aq_err %s\n",
13139 i40e_stat_str(&pf->hw, ret),
13140 i40e_aq_str(&pf->hw,
13141 pf->hw.aq.asq_last_status));
13147 /* MFP mode setup queue map and update VSI */
13148 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13149 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
13150 memset(&ctxt, 0, sizeof(ctxt));
13151 ctxt.seid = pf->main_vsi_seid;
13152 ctxt.pf_num = pf->hw.pf_id;
13154 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13155 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13157 dev_info(&pf->pdev->dev,
13158 "update vsi failed, err %s aq_err %s\n",
13159 i40e_stat_str(&pf->hw, ret),
13160 i40e_aq_str(&pf->hw,
13161 pf->hw.aq.asq_last_status));
13165 /* update the local VSI info queue map */
13166 i40e_vsi_update_queue_map(vsi, &ctxt);
13167 vsi->info.valid_sections = 0;
13169 /* Default/Main VSI is only enabled for TC0
13170 * reconfigure it to enable all TCs that are
13171 * available on the port in SFP mode.
13172 * For MFP case the iSCSI PF would use this
13173 * flow to enable LAN+iSCSI TC.
13175 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13177 /* Single TC condition is not fatal,
13178 * message and continue
13180 dev_info(&pf->pdev->dev,
13181 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13183 i40e_stat_str(&pf->hw, ret),
13184 i40e_aq_str(&pf->hw,
13185 pf->hw.aq.asq_last_status));
13190 case I40E_VSI_FDIR:
13191 ctxt.pf_num = hw->pf_id;
13193 ctxt.uplink_seid = vsi->uplink_seid;
13194 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13195 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13196 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13197 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13198 ctxt.info.valid_sections |=
13199 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13200 ctxt.info.switch_id =
13201 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13203 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13206 case I40E_VSI_VMDQ2:
13207 ctxt.pf_num = hw->pf_id;
13209 ctxt.uplink_seid = vsi->uplink_seid;
13210 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13211 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13213 /* This VSI is connected to VEB so the switch_id
13214 * should be set to zero by default.
13216 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13217 ctxt.info.valid_sections |=
13218 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13219 ctxt.info.switch_id =
13220 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13223 /* Setup the VSI tx/rx queue map for TC0 only for now */
13224 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13227 case I40E_VSI_SRIOV:
13228 ctxt.pf_num = hw->pf_id;
13229 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13230 ctxt.uplink_seid = vsi->uplink_seid;
13231 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13232 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13234 /* This VSI is connected to VEB so the switch_id
13235 * should be set to zero by default.
13237 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13238 ctxt.info.valid_sections |=
13239 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13240 ctxt.info.switch_id =
13241 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13244 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13245 ctxt.info.valid_sections |=
13246 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13247 ctxt.info.queueing_opt_flags |=
13248 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13249 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13252 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13253 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13254 if (pf->vf[vsi->vf_id].spoofchk) {
13255 ctxt.info.valid_sections |=
13256 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13257 ctxt.info.sec_flags |=
13258 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13259 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13261 /* Setup the VSI tx/rx queue map for TC0 only for now */
13262 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13265 case I40E_VSI_IWARP:
13266 /* send down message to iWARP */
13273 if (vsi->type != I40E_VSI_MAIN) {
13274 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13276 dev_info(&vsi->back->pdev->dev,
13277 "add vsi failed, err %s aq_err %s\n",
13278 i40e_stat_str(&pf->hw, ret),
13279 i40e_aq_str(&pf->hw,
13280 pf->hw.aq.asq_last_status));
13284 vsi->info = ctxt.info;
13285 vsi->info.valid_sections = 0;
13286 vsi->seid = ctxt.seid;
13287 vsi->id = ctxt.vsi_number;
13290 vsi->active_filters = 0;
13291 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13292 spin_lock_bh(&vsi->mac_filter_hash_lock);
13293 /* If macvlan filters already exist, force them to get loaded */
13294 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13295 f->state = I40E_FILTER_NEW;
13298 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13301 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13302 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13305 /* Update VSI BW information */
13306 ret = i40e_vsi_get_bw_info(vsi);
13308 dev_info(&pf->pdev->dev,
13309 "couldn't get vsi bw info, err %s aq_err %s\n",
13310 i40e_stat_str(&pf->hw, ret),
13311 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13312 /* VSI is already added so not tearing that up */
13321 * i40e_vsi_release - Delete a VSI and free its resources
13322 * @vsi: the VSI being removed
13324 * Returns 0 on success or < 0 on error
13326 int i40e_vsi_release(struct i40e_vsi *vsi)
13328 struct i40e_mac_filter *f;
13329 struct hlist_node *h;
13330 struct i40e_veb *veb = NULL;
13331 struct i40e_pf *pf;
13337 /* release of a VEB-owner or last VSI is not allowed */
13338 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13339 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13340 vsi->seid, vsi->uplink_seid);
13343 if (vsi == pf->vsi[pf->lan_vsi] &&
13344 !test_bit(__I40E_DOWN, pf->state)) {
13345 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13349 uplink_seid = vsi->uplink_seid;
13350 if (vsi->type != I40E_VSI_SRIOV) {
13351 if (vsi->netdev_registered) {
13352 vsi->netdev_registered = false;
13354 /* results in a call to i40e_close() */
13355 unregister_netdev(vsi->netdev);
13358 i40e_vsi_close(vsi);
13360 i40e_vsi_disable_irq(vsi);
13363 spin_lock_bh(&vsi->mac_filter_hash_lock);
13365 /* clear the sync flag on all filters */
13367 __dev_uc_unsync(vsi->netdev, NULL);
13368 __dev_mc_unsync(vsi->netdev, NULL);
13371 /* make sure any remaining filters are marked for deletion */
13372 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13373 __i40e_del_filter(vsi, f);
13375 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13377 i40e_sync_vsi_filters(vsi);
13379 i40e_vsi_delete(vsi);
13380 i40e_vsi_free_q_vectors(vsi);
13382 free_netdev(vsi->netdev);
13383 vsi->netdev = NULL;
13385 i40e_vsi_clear_rings(vsi);
13386 i40e_vsi_clear(vsi);
13388 /* If this was the last thing on the VEB, except for the
13389 * controlling VSI, remove the VEB, which puts the controlling
13390 * VSI onto the next level down in the switch.
13392 * Well, okay, there's one more exception here: don't remove
13393 * the orphan VEBs yet. We'll wait for an explicit remove request
13394 * from up the network stack.
13396 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13398 pf->vsi[i]->uplink_seid == uplink_seid &&
13399 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13400 n++; /* count the VSIs */
13403 for (i = 0; i < I40E_MAX_VEB; i++) {
13406 if (pf->veb[i]->uplink_seid == uplink_seid)
13407 n++; /* count the VEBs */
13408 if (pf->veb[i]->seid == uplink_seid)
13411 if (n == 0 && veb && veb->uplink_seid != 0)
13412 i40e_veb_release(veb);
13418 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
13419 * @vsi: ptr to the VSI
13421 * This should only be called after i40e_vsi_mem_alloc() which allocates the
13422 * corresponding SW VSI structure and initializes num_queue_pairs for the
13423 * newly allocated VSI.
13425 * Returns 0 on success or negative on failure
13427 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13430 struct i40e_pf *pf = vsi->back;
13432 if (vsi->q_vectors[0]) {
13433 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13438 if (vsi->base_vector) {
13439 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13440 vsi->seid, vsi->base_vector);
13444 ret = i40e_vsi_alloc_q_vectors(vsi);
13446 dev_info(&pf->pdev->dev,
13447 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13448 vsi->num_q_vectors, vsi->seid, ret);
13449 vsi->num_q_vectors = 0;
13450 goto vector_setup_out;
13453 /* In Legacy mode, we do not have to get any other vector since we
13454 * piggyback on the misc/ICR0 for queue interrupts.
13456 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13458 if (vsi->num_q_vectors)
13459 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13460 vsi->num_q_vectors, vsi->idx);
13461 if (vsi->base_vector < 0) {
13462 dev_info(&pf->pdev->dev,
13463 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13464 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13465 i40e_vsi_free_q_vectors(vsi);
13467 goto vector_setup_out;
13475 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
13476 * @vsi: pointer to the vsi.
13478 * This re-allocates a vsi's queue resources.
13480 * Returns pointer to the successfully allocated and configured VSI sw struct
13481 * on success, otherwise returns NULL on failure.
13483 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13485 u16 alloc_queue_pairs;
13486 struct i40e_pf *pf;
13495 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13496 i40e_vsi_clear_rings(vsi);
13498 i40e_vsi_free_arrays(vsi, false);
13499 i40e_set_num_rings_in_vsi(vsi);
13500 ret = i40e_vsi_alloc_arrays(vsi, false);
13504 alloc_queue_pairs = vsi->alloc_queue_pairs *
13505 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13507 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13509 dev_info(&pf->pdev->dev,
13510 "failed to get tracking for %d queues for VSI %d err %d\n",
13511 alloc_queue_pairs, vsi->seid, ret);
13514 vsi->base_queue = ret;
13516 /* Update the FW view of the VSI. Force a reset of TC and queue
13517 * layout configurations.
13519 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13520 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13521 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13522 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13523 if (vsi->type == I40E_VSI_MAIN)
13524 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13526 /* assign it some queues */
13527 ret = i40e_alloc_rings(vsi);
13531 /* map all of the rings to the q_vectors */
13532 i40e_vsi_map_rings_to_vectors(vsi);
13536 i40e_vsi_free_q_vectors(vsi);
13537 if (vsi->netdev_registered) {
13538 vsi->netdev_registered = false;
13539 unregister_netdev(vsi->netdev);
13540 free_netdev(vsi->netdev);
13541 vsi->netdev = NULL;
13543 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13545 i40e_vsi_clear(vsi);
13550 * i40e_vsi_setup - Set up a VSI by a given type
13551 * @pf: board private structure
13553 * @uplink_seid: the switch element to link to
13554 * @param1: usage depends upon VSI type. For VF types, indicates VF id
13556 * This allocates the sw VSI structure and its queue resources, then add a VSI
13557 * to the identified VEB.
13559 * Returns pointer to the successfully allocated and configure VSI sw struct on
13560 * success, otherwise returns NULL on failure.
13562 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13563 u16 uplink_seid, u32 param1)
13565 struct i40e_vsi *vsi = NULL;
13566 struct i40e_veb *veb = NULL;
13567 u16 alloc_queue_pairs;
13571 /* The requested uplink_seid must be either
13572 * - the PF's port seid
13573 * no VEB is needed because this is the PF
13574 * or this is a Flow Director special case VSI
13575 * - seid of an existing VEB
13576 * - seid of a VSI that owns an existing VEB
13577 * - seid of a VSI that doesn't own a VEB
13578 * a new VEB is created and the VSI becomes the owner
13579 * - seid of the PF VSI, which is what creates the first VEB
13580 * this is a special case of the previous
13582 * Find which uplink_seid we were given and create a new VEB if needed
13584 for (i = 0; i < I40E_MAX_VEB; i++) {
13585 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13591 if (!veb && uplink_seid != pf->mac_seid) {
13593 for (i = 0; i < pf->num_alloc_vsi; i++) {
13594 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13600 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13605 if (vsi->uplink_seid == pf->mac_seid)
13606 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13607 vsi->tc_config.enabled_tc);
13608 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13609 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13610 vsi->tc_config.enabled_tc);
13612 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13613 dev_info(&vsi->back->pdev->dev,
13614 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13617 /* We come up by default in VEPA mode if SRIOV is not
13618 * already enabled, in which case we can't force VEPA
13621 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13622 veb->bridge_mode = BRIDGE_MODE_VEPA;
13623 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13625 i40e_config_bridge_mode(veb);
13627 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13628 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13632 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13636 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13637 uplink_seid = veb->seid;
13640 /* get vsi sw struct */
13641 v_idx = i40e_vsi_mem_alloc(pf, type);
13644 vsi = pf->vsi[v_idx];
13648 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13650 if (type == I40E_VSI_MAIN)
13651 pf->lan_vsi = v_idx;
13652 else if (type == I40E_VSI_SRIOV)
13653 vsi->vf_id = param1;
13654 /* assign it some queues */
13655 alloc_queue_pairs = vsi->alloc_queue_pairs *
13656 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13658 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13660 dev_info(&pf->pdev->dev,
13661 "failed to get tracking for %d queues for VSI %d err=%d\n",
13662 alloc_queue_pairs, vsi->seid, ret);
13665 vsi->base_queue = ret;
13667 /* get a VSI from the hardware */
13668 vsi->uplink_seid = uplink_seid;
13669 ret = i40e_add_vsi(vsi);
13673 switch (vsi->type) {
13674 /* setup the netdev if needed */
13675 case I40E_VSI_MAIN:
13676 case I40E_VSI_VMDQ2:
13677 ret = i40e_config_netdev(vsi);
13680 ret = register_netdev(vsi->netdev);
13683 vsi->netdev_registered = true;
13684 netif_carrier_off(vsi->netdev);
13685 #ifdef CONFIG_I40E_DCB
13686 /* Setup DCB netlink interface */
13687 i40e_dcbnl_setup(vsi);
13688 #endif /* CONFIG_I40E_DCB */
13691 case I40E_VSI_FDIR:
13692 /* set up vectors and rings if needed */
13693 ret = i40e_vsi_setup_vectors(vsi);
13697 ret = i40e_alloc_rings(vsi);
13701 /* map all of the rings to the q_vectors */
13702 i40e_vsi_map_rings_to_vectors(vsi);
13704 i40e_vsi_reset_stats(vsi);
13708 /* no netdev or rings for the other VSI types */
13712 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13713 (vsi->type == I40E_VSI_VMDQ2)) {
13714 ret = i40e_vsi_config_rss(vsi);
13719 i40e_vsi_free_q_vectors(vsi);
13721 if (vsi->netdev_registered) {
13722 vsi->netdev_registered = false;
13723 unregister_netdev(vsi->netdev);
13724 free_netdev(vsi->netdev);
13725 vsi->netdev = NULL;
13728 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13730 i40e_vsi_clear(vsi);
13736 * i40e_veb_get_bw_info - Query VEB BW information
13737 * @veb: the veb to query
13739 * Query the Tx scheduler BW configuration data for given VEB
13741 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13743 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13744 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13745 struct i40e_pf *pf = veb->pf;
13746 struct i40e_hw *hw = &pf->hw;
13751 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13754 dev_info(&pf->pdev->dev,
13755 "query veb bw config failed, err %s aq_err %s\n",
13756 i40e_stat_str(&pf->hw, ret),
13757 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13761 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13764 dev_info(&pf->pdev->dev,
13765 "query veb bw ets config failed, err %s aq_err %s\n",
13766 i40e_stat_str(&pf->hw, ret),
13767 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13771 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13772 veb->bw_max_quanta = ets_data.tc_bw_max;
13773 veb->is_abs_credits = bw_data.absolute_credits_enable;
13774 veb->enabled_tc = ets_data.tc_valid_bits;
13775 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13776 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13777 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13778 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13779 veb->bw_tc_limit_credits[i] =
13780 le16_to_cpu(bw_data.tc_bw_limits[i]);
13781 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13789 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13790 * @pf: board private structure
13792 * On error: returns error code (negative)
13793 * On success: returns vsi index in PF (positive)
13795 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13798 struct i40e_veb *veb;
13801 /* Need to protect the allocation of switch elements at the PF level */
13802 mutex_lock(&pf->switch_mutex);
13804 /* VEB list may be fragmented if VEB creation/destruction has
13805 * been happening. We can afford to do a quick scan to look
13806 * for any free slots in the list.
13808 * find next empty veb slot, looping back around if necessary
13811 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13813 if (i >= I40E_MAX_VEB) {
13815 goto err_alloc_veb; /* out of VEB slots! */
13818 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13821 goto err_alloc_veb;
13825 veb->enabled_tc = 1;
13830 mutex_unlock(&pf->switch_mutex);
13835 * i40e_switch_branch_release - Delete a branch of the switch tree
13836 * @branch: where to start deleting
13838 * This uses recursion to find the tips of the branch to be
13839 * removed, deleting until we get back to and can delete this VEB.
13841 static void i40e_switch_branch_release(struct i40e_veb *branch)
13843 struct i40e_pf *pf = branch->pf;
13844 u16 branch_seid = branch->seid;
13845 u16 veb_idx = branch->idx;
13848 /* release any VEBs on this VEB - RECURSION */
13849 for (i = 0; i < I40E_MAX_VEB; i++) {
13852 if (pf->veb[i]->uplink_seid == branch->seid)
13853 i40e_switch_branch_release(pf->veb[i]);
13856 /* Release the VSIs on this VEB, but not the owner VSI.
13858 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13859 * the VEB itself, so don't use (*branch) after this loop.
13861 for (i = 0; i < pf->num_alloc_vsi; i++) {
13864 if (pf->vsi[i]->uplink_seid == branch_seid &&
13865 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13866 i40e_vsi_release(pf->vsi[i]);
13870 /* There's one corner case where the VEB might not have been
13871 * removed, so double check it here and remove it if needed.
13872 * This case happens if the veb was created from the debugfs
13873 * commands and no VSIs were added to it.
13875 if (pf->veb[veb_idx])
13876 i40e_veb_release(pf->veb[veb_idx]);
13880 * i40e_veb_clear - remove veb struct
13881 * @veb: the veb to remove
13883 static void i40e_veb_clear(struct i40e_veb *veb)
13889 struct i40e_pf *pf = veb->pf;
13891 mutex_lock(&pf->switch_mutex);
13892 if (pf->veb[veb->idx] == veb)
13893 pf->veb[veb->idx] = NULL;
13894 mutex_unlock(&pf->switch_mutex);
13901 * i40e_veb_release - Delete a VEB and free its resources
13902 * @veb: the VEB being removed
13904 void i40e_veb_release(struct i40e_veb *veb)
13906 struct i40e_vsi *vsi = NULL;
13907 struct i40e_pf *pf;
13912 /* find the remaining VSI and check for extras */
13913 for (i = 0; i < pf->num_alloc_vsi; i++) {
13914 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13920 dev_info(&pf->pdev->dev,
13921 "can't remove VEB %d with %d VSIs left\n",
13926 /* move the remaining VSI to uplink veb */
13927 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13928 if (veb->uplink_seid) {
13929 vsi->uplink_seid = veb->uplink_seid;
13930 if (veb->uplink_seid == pf->mac_seid)
13931 vsi->veb_idx = I40E_NO_VEB;
13933 vsi->veb_idx = veb->veb_idx;
13936 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13937 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13940 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13941 i40e_veb_clear(veb);
13945 * i40e_add_veb - create the VEB in the switch
13946 * @veb: the VEB to be instantiated
13947 * @vsi: the controlling VSI
13949 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13951 struct i40e_pf *pf = veb->pf;
13952 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13955 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13956 veb->enabled_tc, false,
13957 &veb->seid, enable_stats, NULL);
13959 /* get a VEB from the hardware */
13961 dev_info(&pf->pdev->dev,
13962 "couldn't add VEB, err %s aq_err %s\n",
13963 i40e_stat_str(&pf->hw, ret),
13964 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13968 /* get statistics counter */
13969 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13970 &veb->stats_idx, NULL, NULL, NULL);
13972 dev_info(&pf->pdev->dev,
13973 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13974 i40e_stat_str(&pf->hw, ret),
13975 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13978 ret = i40e_veb_get_bw_info(veb);
13980 dev_info(&pf->pdev->dev,
13981 "couldn't get VEB bw info, err %s aq_err %s\n",
13982 i40e_stat_str(&pf->hw, ret),
13983 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13984 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13988 vsi->uplink_seid = veb->seid;
13989 vsi->veb_idx = veb->idx;
13990 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13996 * i40e_veb_setup - Set up a VEB
13997 * @pf: board private structure
13998 * @flags: VEB setup flags
13999 * @uplink_seid: the switch element to link to
14000 * @vsi_seid: the initial VSI seid
14001 * @enabled_tc: Enabled TC bit-map
14003 * This allocates the sw VEB structure and links it into the switch
14004 * It is possible and legal for this to be a duplicate of an already
14005 * existing VEB. It is also possible for both uplink and vsi seids
14006 * to be zero, in order to create a floating VEB.
14008 * Returns pointer to the successfully allocated VEB sw struct on
14009 * success, otherwise returns NULL on failure.
14011 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14012 u16 uplink_seid, u16 vsi_seid,
14015 struct i40e_veb *veb, *uplink_veb = NULL;
14016 int vsi_idx, veb_idx;
14019 /* if one seid is 0, the other must be 0 to create a floating relay */
14020 if ((uplink_seid == 0 || vsi_seid == 0) &&
14021 (uplink_seid + vsi_seid != 0)) {
14022 dev_info(&pf->pdev->dev,
14023 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14024 uplink_seid, vsi_seid);
14028 /* make sure there is such a vsi and uplink */
14029 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14030 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14032 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14033 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14038 if (uplink_seid && uplink_seid != pf->mac_seid) {
14039 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14040 if (pf->veb[veb_idx] &&
14041 pf->veb[veb_idx]->seid == uplink_seid) {
14042 uplink_veb = pf->veb[veb_idx];
14047 dev_info(&pf->pdev->dev,
14048 "uplink seid %d not found\n", uplink_seid);
14053 /* get veb sw struct */
14054 veb_idx = i40e_veb_mem_alloc(pf);
14057 veb = pf->veb[veb_idx];
14058 veb->flags = flags;
14059 veb->uplink_seid = uplink_seid;
14060 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14061 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14063 /* create the VEB in the switch */
14064 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14067 if (vsi_idx == pf->lan_vsi)
14068 pf->lan_veb = veb->idx;
14073 i40e_veb_clear(veb);
14079 * i40e_setup_pf_switch_element - set PF vars based on switch type
14080 * @pf: board private structure
14081 * @ele: element we are building info from
14082 * @num_reported: total number of elements
14083 * @printconfig: should we print the contents
14085 * helper function to assist in extracting a few useful SEID values.
14087 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14088 struct i40e_aqc_switch_config_element_resp *ele,
14089 u16 num_reported, bool printconfig)
14091 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14092 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14093 u8 element_type = ele->element_type;
14094 u16 seid = le16_to_cpu(ele->seid);
14097 dev_info(&pf->pdev->dev,
14098 "type=%d seid=%d uplink=%d downlink=%d\n",
14099 element_type, seid, uplink_seid, downlink_seid);
14101 switch (element_type) {
14102 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14103 pf->mac_seid = seid;
14105 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14107 if (uplink_seid != pf->mac_seid)
14109 if (pf->lan_veb >= I40E_MAX_VEB) {
14112 /* find existing or else empty VEB */
14113 for (v = 0; v < I40E_MAX_VEB; v++) {
14114 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14119 if (pf->lan_veb >= I40E_MAX_VEB) {
14120 v = i40e_veb_mem_alloc(pf);
14126 if (pf->lan_veb >= I40E_MAX_VEB)
14129 pf->veb[pf->lan_veb]->seid = seid;
14130 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14131 pf->veb[pf->lan_veb]->pf = pf;
14132 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14134 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14135 if (num_reported != 1)
14137 /* This is immediately after a reset so we can assume this is
14140 pf->mac_seid = uplink_seid;
14141 pf->pf_seid = downlink_seid;
14142 pf->main_vsi_seid = seid;
14144 dev_info(&pf->pdev->dev,
14145 "pf_seid=%d main_vsi_seid=%d\n",
14146 pf->pf_seid, pf->main_vsi_seid);
14148 case I40E_SWITCH_ELEMENT_TYPE_PF:
14149 case I40E_SWITCH_ELEMENT_TYPE_VF:
14150 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14151 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14152 case I40E_SWITCH_ELEMENT_TYPE_PE:
14153 case I40E_SWITCH_ELEMENT_TYPE_PA:
14154 /* ignore these for now */
14157 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14158 element_type, seid);
14164 * i40e_fetch_switch_configuration - Get switch config from firmware
14165 * @pf: board private structure
14166 * @printconfig: should we print the contents
14168 * Get the current switch configuration from the device and
14169 * extract a few useful SEID values.
14171 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14173 struct i40e_aqc_get_switch_config_resp *sw_config;
14179 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14183 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14185 u16 num_reported, num_total;
14187 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14191 dev_info(&pf->pdev->dev,
14192 "get switch config failed err %s aq_err %s\n",
14193 i40e_stat_str(&pf->hw, ret),
14194 i40e_aq_str(&pf->hw,
14195 pf->hw.aq.asq_last_status));
14200 num_reported = le16_to_cpu(sw_config->header.num_reported);
14201 num_total = le16_to_cpu(sw_config->header.num_total);
14204 dev_info(&pf->pdev->dev,
14205 "header: %d reported %d total\n",
14206 num_reported, num_total);
14208 for (i = 0; i < num_reported; i++) {
14209 struct i40e_aqc_switch_config_element_resp *ele =
14210 &sw_config->element[i];
14212 i40e_setup_pf_switch_element(pf, ele, num_reported,
14215 } while (next_seid != 0);
14222 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
14223 * @pf: board private structure
14224 * @reinit: if the Main VSI needs to re-initialized.
14226 * Returns 0 on success, negative value on failure
14228 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14233 /* find out what's out there already */
14234 ret = i40e_fetch_switch_configuration(pf, false);
14236 dev_info(&pf->pdev->dev,
14237 "couldn't fetch switch config, err %s aq_err %s\n",
14238 i40e_stat_str(&pf->hw, ret),
14239 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14242 i40e_pf_reset_stats(pf);
14244 /* set the switch config bit for the whole device to
14245 * support limited promisc or true promisc
14246 * when user requests promisc. The default is limited
14250 if ((pf->hw.pf_id == 0) &&
14251 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14252 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14253 pf->last_sw_conf_flags = flags;
14256 if (pf->hw.pf_id == 0) {
14259 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14260 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14262 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14263 dev_info(&pf->pdev->dev,
14264 "couldn't set switch config bits, err %s aq_err %s\n",
14265 i40e_stat_str(&pf->hw, ret),
14266 i40e_aq_str(&pf->hw,
14267 pf->hw.aq.asq_last_status));
14268 /* not a fatal problem, just keep going */
14270 pf->last_sw_conf_valid_flags = valid_flags;
14273 /* first time setup */
14274 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14275 struct i40e_vsi *vsi = NULL;
14278 /* Set up the PF VSI associated with the PF's main VSI
14279 * that is already in the HW switch
14281 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14282 uplink_seid = pf->veb[pf->lan_veb]->seid;
14284 uplink_seid = pf->mac_seid;
14285 if (pf->lan_vsi == I40E_NO_VSI)
14286 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14288 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14290 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14291 i40e_cloud_filter_exit(pf);
14292 i40e_fdir_teardown(pf);
14296 /* force a reset of TC and queue layout configurations */
14297 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14299 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14300 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14301 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14303 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14305 i40e_fdir_sb_setup(pf);
14307 /* Setup static PF queue filter control settings */
14308 ret = i40e_setup_pf_filter_control(pf);
14310 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14312 /* Failure here should not stop continuing other steps */
14315 /* enable RSS in the HW, even for only one queue, as the stack can use
14318 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14319 i40e_pf_config_rss(pf);
14321 /* fill in link information and enable LSE reporting */
14322 i40e_link_event(pf);
14324 /* Initialize user-specific link properties */
14325 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14326 I40E_AQ_AN_COMPLETED) ? true : false);
14330 /* repopulate tunnel port filters */
14331 i40e_sync_udp_filters(pf);
14337 * i40e_determine_queue_usage - Work out queue distribution
14338 * @pf: board private structure
14340 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14345 pf->num_lan_qps = 0;
14347 /* Find the max queues to be put into basic use. We'll always be
14348 * using TC0, whether or not DCB is running, and TC0 will get the
14351 queues_left = pf->hw.func_caps.num_tx_qp;
14353 if ((queues_left == 1) ||
14354 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14355 /* one qp for PF, no queues for anything else */
14357 pf->alloc_rss_size = pf->num_lan_qps = 1;
14359 /* make sure all the fancies are disabled */
14360 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14361 I40E_FLAG_IWARP_ENABLED |
14362 I40E_FLAG_FD_SB_ENABLED |
14363 I40E_FLAG_FD_ATR_ENABLED |
14364 I40E_FLAG_DCB_CAPABLE |
14365 I40E_FLAG_DCB_ENABLED |
14366 I40E_FLAG_SRIOV_ENABLED |
14367 I40E_FLAG_VMDQ_ENABLED);
14368 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14369 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14370 I40E_FLAG_FD_SB_ENABLED |
14371 I40E_FLAG_FD_ATR_ENABLED |
14372 I40E_FLAG_DCB_CAPABLE))) {
14373 /* one qp for PF */
14374 pf->alloc_rss_size = pf->num_lan_qps = 1;
14375 queues_left -= pf->num_lan_qps;
14377 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14378 I40E_FLAG_IWARP_ENABLED |
14379 I40E_FLAG_FD_SB_ENABLED |
14380 I40E_FLAG_FD_ATR_ENABLED |
14381 I40E_FLAG_DCB_ENABLED |
14382 I40E_FLAG_VMDQ_ENABLED);
14383 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14385 /* Not enough queues for all TCs */
14386 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14387 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14388 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14389 I40E_FLAG_DCB_ENABLED);
14390 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14393 /* limit lan qps to the smaller of qps, cpus or msix */
14394 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14395 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14396 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14397 pf->num_lan_qps = q_max;
14399 queues_left -= pf->num_lan_qps;
14402 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14403 if (queues_left > 1) {
14404 queues_left -= 1; /* save 1 queue for FD */
14406 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14407 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14408 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14412 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14413 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14414 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14415 (queues_left / pf->num_vf_qps));
14416 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14419 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14420 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14421 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14422 (queues_left / pf->num_vmdq_qps));
14423 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14426 pf->queues_left = queues_left;
14427 dev_dbg(&pf->pdev->dev,
14428 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14429 pf->hw.func_caps.num_tx_qp,
14430 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14431 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14432 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14437 * i40e_setup_pf_filter_control - Setup PF static filter control
14438 * @pf: PF to be setup
14440 * i40e_setup_pf_filter_control sets up a PF's initial filter control
14441 * settings. If PE/FCoE are enabled then it will also set the per PF
14442 * based filter sizes required for them. It also enables Flow director,
14443 * ethertype and macvlan type filter settings for the pf.
14445 * Returns 0 on success, negative on failure
14447 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14449 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14451 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14453 /* Flow Director is enabled */
14454 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14455 settings->enable_fdir = true;
14457 /* Ethtype and MACVLAN filters enabled for PF */
14458 settings->enable_ethtype = true;
14459 settings->enable_macvlan = true;
14461 if (i40e_set_filter_control(&pf->hw, settings))
14467 #define INFO_STRING_LEN 255
14468 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14469 static void i40e_print_features(struct i40e_pf *pf)
14471 struct i40e_hw *hw = &pf->hw;
14475 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14479 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14480 #ifdef CONFIG_PCI_IOV
14481 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14483 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14484 pf->hw.func_caps.num_vsis,
14485 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14486 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14487 i += snprintf(&buf[i], REMAIN(i), " RSS");
14488 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14489 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
14490 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14491 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
14492 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
14494 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14495 i += snprintf(&buf[i], REMAIN(i), " DCB");
14496 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
14497 i += snprintf(&buf[i], REMAIN(i), " Geneve");
14498 if (pf->flags & I40E_FLAG_PTP)
14499 i += snprintf(&buf[i], REMAIN(i), " PTP");
14500 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14501 i += snprintf(&buf[i], REMAIN(i), " VEB");
14503 i += snprintf(&buf[i], REMAIN(i), " VEPA");
14505 dev_info(&pf->pdev->dev, "%s\n", buf);
14507 WARN_ON(i > INFO_STRING_LEN);
14511 * i40e_get_platform_mac_addr - get platform-specific MAC address
14512 * @pdev: PCI device information struct
14513 * @pf: board private structure
14515 * Look up the MAC address for the device. First we'll try
14516 * eth_platform_get_mac_address, which will check Open Firmware, or arch
14517 * specific fallback. Otherwise, we'll default to the stored value in
14520 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14522 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14523 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14527 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
14528 * @fec_cfg: FEC option to set in flags
14529 * @flags: ptr to flags in which we set FEC option
14531 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14533 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14534 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14535 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14536 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14537 *flags |= I40E_FLAG_RS_FEC;
14538 *flags &= ~I40E_FLAG_BASE_R_FEC;
14540 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14541 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14542 *flags |= I40E_FLAG_BASE_R_FEC;
14543 *flags &= ~I40E_FLAG_RS_FEC;
14546 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14550 * i40e_check_recovery_mode - check if we are running transition firmware
14551 * @pf: board private structure
14553 * Check registers indicating the firmware runs in recovery mode. Sets the
14554 * appropriate driver state.
14556 * Returns true if the recovery mode was detected, false otherwise
14558 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14560 u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
14561 bool is_recovery_mode = false;
14563 if (pf->hw.mac.type == I40E_MAC_XL710)
14565 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14566 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
14567 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
14568 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
14569 if (pf->hw.mac.type == I40E_MAC_X722)
14571 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14572 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
14573 if (is_recovery_mode) {
14574 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14575 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14576 set_bit(__I40E_RECOVERY_MODE, pf->state);
14580 if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
14581 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
14587 * i40e_pf_loop_reset - perform reset in a loop.
14588 * @pf: board private structure
14590 * This function is useful when a NIC is about to enter recovery mode.
14591 * When a NIC's internal data structures are corrupted the NIC's
14592 * firmware is going to enter recovery mode.
14593 * Right after a POR it takes about 7 minutes for firmware to enter
14594 * recovery mode. Until that time a NIC is in some kind of intermediate
14595 * state. After that time period the NIC almost surely enters
14596 * recovery mode. The only way for a driver to detect intermediate
14597 * state is to issue a series of pf-resets and check a return value.
14598 * If a PF reset returns success then the firmware could be in recovery
14599 * mode so the caller of this code needs to check for recovery mode
14600 * if this function returns success. There is a little chance that
14601 * firmware will hang in intermediate state forever.
14602 * Since waiting 7 minutes is quite a lot of time this function waits
14603 * 10 seconds and then gives up by returning an error.
14605 * Return 0 on success, negative on failure.
14607 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14609 const unsigned short MAX_CNT = 1000;
14610 const unsigned short MSECS = 10;
14611 struct i40e_hw *hw = &pf->hw;
14615 for (cnt = 0; cnt < MAX_CNT; ++cnt) {
14616 ret = i40e_pf_reset(hw);
14622 if (cnt == MAX_CNT) {
14623 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14632 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
14633 * @pf: board private structure
14634 * @hw: ptr to the hardware info
14636 * This function does a minimal setup of all subsystems needed for running
14639 * Returns 0 on success, negative on failure
14641 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14643 struct i40e_vsi *vsi;
14647 pci_save_state(pf->pdev);
14649 /* set up periodic task facility */
14650 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14651 pf->service_timer_period = HZ;
14653 INIT_WORK(&pf->service_task, i40e_service_task);
14654 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14656 err = i40e_init_interrupt_scheme(pf);
14658 goto err_switch_setup;
14660 /* The number of VSIs reported by the FW is the minimum guaranteed
14661 * to us; HW supports far more and we share the remaining pool with
14662 * the other PFs. We allocate space for more than the guarantee with
14663 * the understanding that we might not get them all later.
14665 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14666 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14668 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14670 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
14671 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14675 goto err_switch_setup;
14678 /* We allocate one VSI which is needed as absolute minimum
14679 * in order to register the netdev
14681 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14683 goto err_switch_setup;
14684 pf->lan_vsi = v_idx;
14685 vsi = pf->vsi[v_idx];
14687 goto err_switch_setup;
14688 vsi->alloc_queue_pairs = 1;
14689 err = i40e_config_netdev(vsi);
14691 goto err_switch_setup;
14692 err = register_netdev(vsi->netdev);
14694 goto err_switch_setup;
14695 vsi->netdev_registered = true;
14696 i40e_dbg_pf_init(pf);
14698 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14700 goto err_switch_setup;
14702 /* tell the firmware that we're starting */
14703 i40e_send_version(pf);
14705 /* since everything's happy, start the service_task timer */
14706 mod_timer(&pf->service_timer,
14707 round_jiffies(jiffies + pf->service_timer_period));
14712 i40e_reset_interrupt_capability(pf);
14713 del_timer_sync(&pf->service_timer);
14714 i40e_shutdown_adminq(hw);
14715 iounmap(hw->hw_addr);
14716 pci_disable_pcie_error_reporting(pf->pdev);
14717 pci_release_mem_regions(pf->pdev);
14718 pci_disable_device(pf->pdev);
14725 * i40e_probe - Device initialization routine
14726 * @pdev: PCI device information struct
14727 * @ent: entry in i40e_pci_tbl
14729 * i40e_probe initializes a PF identified by a pci_dev structure.
14730 * The OS initialization, configuring of the PF private structure,
14731 * and a hardware reset occur.
14733 * Returns 0 on success, negative on failure
14735 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14737 struct i40e_aq_get_phy_abilities_resp abilities;
14738 struct i40e_pf *pf;
14739 struct i40e_hw *hw;
14740 static u16 pfs_found;
14748 err = pci_enable_device_mem(pdev);
14752 /* set up for high or low dma */
14753 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14755 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14757 dev_err(&pdev->dev,
14758 "DMA configuration failed: 0x%x\n", err);
14763 /* set up pci connections */
14764 err = pci_request_mem_regions(pdev, i40e_driver_name);
14766 dev_info(&pdev->dev,
14767 "pci_request_selected_regions failed %d\n", err);
14771 pci_enable_pcie_error_reporting(pdev);
14772 pci_set_master(pdev);
14774 /* Now that we have a PCI connection, we need to do the
14775 * low level device setup. This is primarily setting up
14776 * the Admin Queue structures and then querying for the
14777 * device's current profile information.
14779 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14786 set_bit(__I40E_DOWN, pf->state);
14791 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14792 I40E_MAX_CSR_SPACE);
14793 /* We believe that the highest register to read is
14794 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
14795 * is not less than that before mapping to prevent a
14798 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14799 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14804 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14805 if (!hw->hw_addr) {
14807 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14808 (unsigned int)pci_resource_start(pdev, 0),
14809 pf->ioremap_len, err);
14812 hw->vendor_id = pdev->vendor;
14813 hw->device_id = pdev->device;
14814 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14815 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14816 hw->subsystem_device_id = pdev->subsystem_device;
14817 hw->bus.device = PCI_SLOT(pdev->devfn);
14818 hw->bus.func = PCI_FUNC(pdev->devfn);
14819 hw->bus.bus_id = pdev->bus->number;
14820 pf->instance = pfs_found;
14822 /* Select something other than the 802.1ad ethertype for the
14823 * switch to use internally and drop on ingress.
14825 hw->switch_tag = 0xffff;
14826 hw->first_tag = ETH_P_8021AD;
14827 hw->second_tag = ETH_P_8021Q;
14829 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14830 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14831 INIT_LIST_HEAD(&pf->ddp_old_prof);
14833 /* set up the locks for the AQ, do this only once in probe
14834 * and destroy them only once in remove
14836 mutex_init(&hw->aq.asq_mutex);
14837 mutex_init(&hw->aq.arq_mutex);
14839 pf->msg_enable = netif_msg_init(debug,
14844 pf->hw.debug_mask = debug;
14846 /* do a special CORER for clearing PXE mode once at init */
14847 if (hw->revision_id == 0 &&
14848 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14849 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14854 i40e_clear_pxe_mode(hw);
14857 /* Reset here to make sure all is clean and to define PF 'n' */
14860 err = i40e_set_mac_type(hw);
14862 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14867 err = i40e_pf_loop_reset(pf);
14869 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14873 i40e_check_recovery_mode(pf);
14875 hw->aq.num_arq_entries = I40E_AQ_LEN;
14876 hw->aq.num_asq_entries = I40E_AQ_LEN;
14877 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14878 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14879 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14881 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14883 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14885 err = i40e_init_shared_code(hw);
14887 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14892 /* set up a default setting for link flow control */
14893 pf->hw.fc.requested_mode = I40E_FC_NONE;
14895 err = i40e_init_adminq(hw);
14897 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14898 dev_info(&pdev->dev,
14899 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14900 hw->aq.api_maj_ver,
14901 hw->aq.api_min_ver,
14902 I40E_FW_API_VERSION_MAJOR,
14903 I40E_FW_MINOR_VERSION(hw));
14905 dev_info(&pdev->dev,
14906 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14910 i40e_get_oem_version(hw);
14912 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
14913 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14914 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14915 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14916 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14917 hw->subsystem_vendor_id, hw->subsystem_device_id);
14919 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14920 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14921 dev_info(&pdev->dev,
14922 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14923 hw->aq.api_maj_ver,
14924 hw->aq.api_min_ver,
14925 I40E_FW_API_VERSION_MAJOR,
14926 I40E_FW_MINOR_VERSION(hw));
14927 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14928 dev_info(&pdev->dev,
14929 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14930 hw->aq.api_maj_ver,
14931 hw->aq.api_min_ver,
14932 I40E_FW_API_VERSION_MAJOR,
14933 I40E_FW_MINOR_VERSION(hw));
14935 i40e_verify_eeprom(pf);
14937 /* Rev 0 hardware was never productized */
14938 if (hw->revision_id < 1)
14939 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14941 i40e_clear_pxe_mode(hw);
14943 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14945 goto err_adminq_setup;
14947 err = i40e_sw_init(pf);
14949 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14953 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14954 return i40e_init_recovery_mode(pf, hw);
14956 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14957 hw->func_caps.num_rx_qp, 0, 0);
14959 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14960 goto err_init_lan_hmc;
14963 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14965 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14967 goto err_configure_lan_hmc;
14970 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14971 * Ignore error return codes because if it was already disabled via
14972 * hardware settings this will fail
14974 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14975 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14976 i40e_aq_stop_lldp(hw, true, false, NULL);
14979 /* allow a platform config to override the HW addr */
14980 i40e_get_platform_mac_addr(pdev, pf);
14982 if (!is_valid_ether_addr(hw->mac.addr)) {
14983 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14987 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14988 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14989 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14990 if (is_valid_ether_addr(hw->mac.port_addr))
14991 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14993 pci_set_drvdata(pdev, pf);
14994 pci_save_state(pdev);
14996 dev_info(&pdev->dev,
14997 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
14998 "FW LLDP is disabled\n" :
14999 "FW LLDP is enabled\n");
15001 /* Enable FW to write default DCB config on link-up */
15002 i40e_aq_set_dcb_parameters(hw, true, NULL);
15004 #ifdef CONFIG_I40E_DCB
15005 err = i40e_init_pf_dcb(pf);
15007 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15008 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15009 /* Continue without DCB enabled */
15011 #endif /* CONFIG_I40E_DCB */
15013 /* set up periodic task facility */
15014 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15015 pf->service_timer_period = HZ;
15017 INIT_WORK(&pf->service_task, i40e_service_task);
15018 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15020 /* NVM bit on means WoL disabled for the port */
15021 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15022 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15023 pf->wol_en = false;
15026 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15028 /* set up the main switch operations */
15029 i40e_determine_queue_usage(pf);
15030 err = i40e_init_interrupt_scheme(pf);
15032 goto err_switch_setup;
15034 /* The number of VSIs reported by the FW is the minimum guaranteed
15035 * to us; HW supports far more and we share the remaining pool with
15036 * the other PFs. We allocate space for more than the guarantee with
15037 * the understanding that we might not get them all later.
15039 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15040 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15042 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15044 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
15045 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15049 goto err_switch_setup;
15052 #ifdef CONFIG_PCI_IOV
15053 /* prep for VF support */
15054 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15055 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15056 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15057 if (pci_num_vf(pdev))
15058 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15061 err = i40e_setup_pf_switch(pf, false);
15063 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15066 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15068 /* Make sure flow control is set according to current settings */
15069 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15070 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15071 dev_dbg(&pf->pdev->dev,
15072 "Set fc with err %s aq_err %s on get_phy_cap\n",
15073 i40e_stat_str(hw, err),
15074 i40e_aq_str(hw, hw->aq.asq_last_status));
15075 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15076 dev_dbg(&pf->pdev->dev,
15077 "Set fc with err %s aq_err %s on set_phy_config\n",
15078 i40e_stat_str(hw, err),
15079 i40e_aq_str(hw, hw->aq.asq_last_status));
15080 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15081 dev_dbg(&pf->pdev->dev,
15082 "Set fc with err %s aq_err %s on get_link_info\n",
15083 i40e_stat_str(hw, err),
15084 i40e_aq_str(hw, hw->aq.asq_last_status));
15086 /* if FDIR VSI was set up, start it now */
15087 for (i = 0; i < pf->num_alloc_vsi; i++) {
15088 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15089 i40e_vsi_open(pf->vsi[i]);
15094 /* The driver only wants link up/down and module qualification
15095 * reports from firmware. Note the negative logic.
15097 err = i40e_aq_set_phy_int_mask(&pf->hw,
15098 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15099 I40E_AQ_EVENT_MEDIA_NA |
15100 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15102 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15103 i40e_stat_str(&pf->hw, err),
15104 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15106 /* Reconfigure hardware for allowing smaller MSS in the case
15107 * of TSO, so that we avoid the MDD being fired and causing
15108 * a reset in the case of small MSS+TSO.
15110 val = rd32(hw, I40E_REG_MSS);
15111 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15112 val &= ~I40E_REG_MSS_MIN_MASK;
15113 val |= I40E_64BYTE_MSS;
15114 wr32(hw, I40E_REG_MSS, val);
15117 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15119 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15121 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15122 i40e_stat_str(&pf->hw, err),
15123 i40e_aq_str(&pf->hw,
15124 pf->hw.aq.asq_last_status));
15126 /* The main driver is (mostly) up and happy. We need to set this state
15127 * before setting up the misc vector or we get a race and the vector
15128 * ends up disabled forever.
15130 clear_bit(__I40E_DOWN, pf->state);
15132 /* In case of MSIX we are going to setup the misc vector right here
15133 * to handle admin queue events etc. In case of legacy and MSI
15134 * the misc functionality and queue processing is combined in
15135 * the same vector and that gets setup at open.
15137 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15138 err = i40e_setup_misc_vector(pf);
15140 dev_info(&pdev->dev,
15141 "setup of misc vector failed: %d\n", err);
15146 #ifdef CONFIG_PCI_IOV
15147 /* prep for VF support */
15148 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15149 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15150 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15151 /* disable link interrupts for VFs */
15152 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15153 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15154 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15157 if (pci_num_vf(pdev)) {
15158 dev_info(&pdev->dev,
15159 "Active VFs found, allocating resources.\n");
15160 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15162 dev_info(&pdev->dev,
15163 "Error %d allocating resources for existing VFs\n",
15167 #endif /* CONFIG_PCI_IOV */
15169 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15170 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15171 pf->num_iwarp_msix,
15172 I40E_IWARP_IRQ_PILE_ID);
15173 if (pf->iwarp_base_vector < 0) {
15174 dev_info(&pdev->dev,
15175 "failed to get tracking for %d vectors for IWARP err=%d\n",
15176 pf->num_iwarp_msix, pf->iwarp_base_vector);
15177 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15181 i40e_dbg_pf_init(pf);
15183 /* tell the firmware that we're starting */
15184 i40e_send_version(pf);
15186 /* since everything's happy, start the service_task timer */
15187 mod_timer(&pf->service_timer,
15188 round_jiffies(jiffies + pf->service_timer_period));
15190 /* add this PF to client device list and launch a client service task */
15191 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15192 err = i40e_lan_add_device(pf);
15194 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15198 #define PCI_SPEED_SIZE 8
15199 #define PCI_WIDTH_SIZE 8
15200 /* Devices on the IOSF bus do not have this information
15201 * and will report PCI Gen 1 x 1 by default so don't bother
15204 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15205 char speed[PCI_SPEED_SIZE] = "Unknown";
15206 char width[PCI_WIDTH_SIZE] = "Unknown";
15208 /* Get the negotiated link width and speed from PCI config
15211 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15214 i40e_set_pci_config_data(hw, link_status);
15216 switch (hw->bus.speed) {
15217 case i40e_bus_speed_8000:
15218 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15219 case i40e_bus_speed_5000:
15220 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15221 case i40e_bus_speed_2500:
15222 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15226 switch (hw->bus.width) {
15227 case i40e_bus_width_pcie_x8:
15228 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15229 case i40e_bus_width_pcie_x4:
15230 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15231 case i40e_bus_width_pcie_x2:
15232 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15233 case i40e_bus_width_pcie_x1:
15234 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15239 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15242 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15243 hw->bus.speed < i40e_bus_speed_8000) {
15244 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15245 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15249 /* get the requested speeds from the fw */
15250 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15252 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15253 i40e_stat_str(&pf->hw, err),
15254 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15255 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15257 /* set the FEC config due to the board capabilities */
15258 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15260 /* get the supported phy types from the fw */
15261 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15263 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15264 i40e_stat_str(&pf->hw, err),
15265 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15267 /* Add a filter to drop all Flow control frames from any VSI from being
15268 * transmitted. By doing so we stop a malicious VF from sending out
15269 * PAUSE or PFC frames and potentially controlling traffic for other
15271 * The FW can still send Flow control frames if enabled.
15273 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15274 pf->main_vsi_seid);
15276 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15277 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15278 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15279 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15280 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15281 /* print a string summarizing features */
15282 i40e_print_features(pf);
15286 /* Unwind what we've done if something failed in the setup */
15288 set_bit(__I40E_DOWN, pf->state);
15289 i40e_clear_interrupt_scheme(pf);
15292 i40e_reset_interrupt_capability(pf);
15293 del_timer_sync(&pf->service_timer);
15295 err_configure_lan_hmc:
15296 (void)i40e_shutdown_lan_hmc(hw);
15298 kfree(pf->qp_pile);
15302 iounmap(hw->hw_addr);
15306 pci_disable_pcie_error_reporting(pdev);
15307 pci_release_mem_regions(pdev);
15310 pci_disable_device(pdev);
15315 * i40e_remove - Device removal routine
15316 * @pdev: PCI device information struct
15318 * i40e_remove is called by the PCI subsystem to alert the driver
15319 * that is should release a PCI device. This could be caused by a
15320 * Hot-Plug event, or because the driver is going to be removed from
15323 static void i40e_remove(struct pci_dev *pdev)
15325 struct i40e_pf *pf = pci_get_drvdata(pdev);
15326 struct i40e_hw *hw = &pf->hw;
15327 i40e_status ret_code;
15330 i40e_dbg_pf_exit(pf);
15334 /* Disable RSS in hw */
15335 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15336 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15338 /* no more scheduling of any task */
15339 set_bit(__I40E_SUSPENDED, pf->state);
15340 set_bit(__I40E_DOWN, pf->state);
15341 if (pf->service_timer.function)
15342 del_timer_sync(&pf->service_timer);
15343 if (pf->service_task.func)
15344 cancel_work_sync(&pf->service_task);
15346 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15347 struct i40e_vsi *vsi = pf->vsi[0];
15349 /* We know that we have allocated only one vsi for this PF,
15350 * it was just for registering netdevice, so the interface
15351 * could be visible in the 'ifconfig' output
15353 unregister_netdev(vsi->netdev);
15354 free_netdev(vsi->netdev);
15359 /* Client close must be called explicitly here because the timer
15360 * has been stopped.
15362 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15364 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15366 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15369 i40e_fdir_teardown(pf);
15371 /* If there is a switch structure or any orphans, remove them.
15372 * This will leave only the PF's VSI remaining.
15374 for (i = 0; i < I40E_MAX_VEB; i++) {
15378 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15379 pf->veb[i]->uplink_seid == 0)
15380 i40e_switch_branch_release(pf->veb[i]);
15383 /* Now we can shutdown the PF's VSI, just before we kill
15386 if (pf->vsi[pf->lan_vsi])
15387 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15389 i40e_cloud_filter_exit(pf);
15391 /* remove attached clients */
15392 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15393 ret_code = i40e_lan_del_device(pf);
15395 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15399 /* shutdown and destroy the HMC */
15400 if (hw->hmc.hmc_obj) {
15401 ret_code = i40e_shutdown_lan_hmc(hw);
15403 dev_warn(&pdev->dev,
15404 "Failed to destroy the HMC resources: %d\n",
15409 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15410 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15411 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15412 free_irq(pf->pdev->irq, pf);
15414 /* shutdown the adminq */
15415 i40e_shutdown_adminq(hw);
15417 /* destroy the locks only once, here */
15418 mutex_destroy(&hw->aq.arq_mutex);
15419 mutex_destroy(&hw->aq.asq_mutex);
15421 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
15423 i40e_clear_interrupt_scheme(pf);
15424 for (i = 0; i < pf->num_alloc_vsi; i++) {
15426 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15427 i40e_vsi_clear_rings(pf->vsi[i]);
15428 i40e_vsi_clear(pf->vsi[i]);
15434 for (i = 0; i < I40E_MAX_VEB; i++) {
15439 kfree(pf->qp_pile);
15442 iounmap(hw->hw_addr);
15444 pci_release_mem_regions(pdev);
15446 pci_disable_pcie_error_reporting(pdev);
15447 pci_disable_device(pdev);
15451 * i40e_pci_error_detected - warning that something funky happened in PCI land
15452 * @pdev: PCI device information struct
15453 * @error: the type of PCI error
15455 * Called to warn that something happened and the error handling steps
15456 * are in progress. Allows the driver to quiesce things, be ready for
15459 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15460 enum pci_channel_state error)
15462 struct i40e_pf *pf = pci_get_drvdata(pdev);
15464 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15467 dev_info(&pdev->dev,
15468 "Cannot recover - error happened during device probe\n");
15469 return PCI_ERS_RESULT_DISCONNECT;
15472 /* shutdown all operations */
15473 if (!test_bit(__I40E_SUSPENDED, pf->state))
15474 i40e_prep_for_reset(pf, false);
15476 /* Request a slot reset */
15477 return PCI_ERS_RESULT_NEED_RESET;
15481 * i40e_pci_error_slot_reset - a PCI slot reset just happened
15482 * @pdev: PCI device information struct
15484 * Called to find if the driver can work with the device now that
15485 * the pci slot has been reset. If a basic connection seems good
15486 * (registers are readable and have sane content) then return a
15487 * happy little PCI_ERS_RESULT_xxx.
15489 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15491 struct i40e_pf *pf = pci_get_drvdata(pdev);
15492 pci_ers_result_t result;
15495 dev_dbg(&pdev->dev, "%s\n", __func__);
15496 if (pci_enable_device_mem(pdev)) {
15497 dev_info(&pdev->dev,
15498 "Cannot re-enable PCI device after reset.\n");
15499 result = PCI_ERS_RESULT_DISCONNECT;
15501 pci_set_master(pdev);
15502 pci_restore_state(pdev);
15503 pci_save_state(pdev);
15504 pci_wake_from_d3(pdev, false);
15506 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15508 result = PCI_ERS_RESULT_RECOVERED;
15510 result = PCI_ERS_RESULT_DISCONNECT;
15517 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
15518 * @pdev: PCI device information struct
15520 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15522 struct i40e_pf *pf = pci_get_drvdata(pdev);
15524 i40e_prep_for_reset(pf, false);
15528 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
15529 * @pdev: PCI device information struct
15531 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15533 struct i40e_pf *pf = pci_get_drvdata(pdev);
15535 i40e_reset_and_rebuild(pf, false, false);
15539 * i40e_pci_error_resume - restart operations after PCI error recovery
15540 * @pdev: PCI device information struct
15542 * Called to allow the driver to bring things back up after PCI error
15543 * and/or reset recovery has finished.
15545 static void i40e_pci_error_resume(struct pci_dev *pdev)
15547 struct i40e_pf *pf = pci_get_drvdata(pdev);
15549 dev_dbg(&pdev->dev, "%s\n", __func__);
15550 if (test_bit(__I40E_SUSPENDED, pf->state))
15553 i40e_handle_reset_warning(pf, false);
15557 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
15558 * using the mac_address_write admin q function
15559 * @pf: pointer to i40e_pf struct
15561 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15563 struct i40e_hw *hw = &pf->hw;
15568 /* Get current MAC address in case it's an LAA */
15569 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15570 ether_addr_copy(mac_addr,
15571 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15573 dev_err(&pf->pdev->dev,
15574 "Failed to retrieve MAC address; using default\n");
15575 ether_addr_copy(mac_addr, hw->mac.addr);
15578 /* The FW expects the mac address write cmd to first be called with
15579 * one of these flags before calling it again with the multicast
15582 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15584 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15585 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15587 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15589 dev_err(&pf->pdev->dev,
15590 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15594 flags = I40E_AQC_MC_MAG_EN
15595 | I40E_AQC_WOL_PRESERVE_ON_PFR
15596 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15597 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15599 dev_err(&pf->pdev->dev,
15600 "Failed to enable Multicast Magic Packet wake up\n");
15604 * i40e_shutdown - PCI callback for shutting down
15605 * @pdev: PCI device information struct
15607 static void i40e_shutdown(struct pci_dev *pdev)
15609 struct i40e_pf *pf = pci_get_drvdata(pdev);
15610 struct i40e_hw *hw = &pf->hw;
15612 set_bit(__I40E_SUSPENDED, pf->state);
15613 set_bit(__I40E_DOWN, pf->state);
15615 del_timer_sync(&pf->service_timer);
15616 cancel_work_sync(&pf->service_task);
15617 i40e_cloud_filter_exit(pf);
15618 i40e_fdir_teardown(pf);
15620 /* Client close must be called explicitly here because the timer
15621 * has been stopped.
15623 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15625 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15626 i40e_enable_mc_magic_wake(pf);
15628 i40e_prep_for_reset(pf, false);
15630 wr32(hw, I40E_PFPM_APM,
15631 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15632 wr32(hw, I40E_PFPM_WUFC,
15633 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15635 /* Free MSI/legacy interrupt 0 when in recovery mode. */
15636 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15637 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15638 free_irq(pf->pdev->irq, pf);
15640 /* Since we're going to destroy queues during the
15641 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15645 i40e_clear_interrupt_scheme(pf);
15648 if (system_state == SYSTEM_POWER_OFF) {
15649 pci_wake_from_d3(pdev, pf->wol_en);
15650 pci_set_power_state(pdev, PCI_D3hot);
15655 * i40e_suspend - PM callback for moving to D3
15656 * @dev: generic device information structure
15658 static int __maybe_unused i40e_suspend(struct device *dev)
15660 struct i40e_pf *pf = dev_get_drvdata(dev);
15661 struct i40e_hw *hw = &pf->hw;
15663 /* If we're already suspended, then there is nothing to do */
15664 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15667 set_bit(__I40E_DOWN, pf->state);
15669 /* Ensure service task will not be running */
15670 del_timer_sync(&pf->service_timer);
15671 cancel_work_sync(&pf->service_task);
15673 /* Client close must be called explicitly here because the timer
15674 * has been stopped.
15676 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15678 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15679 i40e_enable_mc_magic_wake(pf);
15681 /* Since we're going to destroy queues during the
15682 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
15687 i40e_prep_for_reset(pf, true);
15689 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15690 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15692 /* Clear the interrupt scheme and release our IRQs so that the system
15693 * can safely hibernate even when there are a large number of CPUs.
15694 * Otherwise hibernation might fail when mapping all the vectors back
15697 i40e_clear_interrupt_scheme(pf);
15705 * i40e_resume - PM callback for waking up from D3
15706 * @dev: generic device information structure
15708 static int __maybe_unused i40e_resume(struct device *dev)
15710 struct i40e_pf *pf = dev_get_drvdata(dev);
15713 /* If we're not suspended, then there is nothing to do */
15714 if (!test_bit(__I40E_SUSPENDED, pf->state))
15717 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
15718 * since we're going to be restoring queues
15722 /* We cleared the interrupt scheme when we suspended, so we need to
15723 * restore it now to resume device functionality.
15725 err = i40e_restore_interrupt_scheme(pf);
15727 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
15731 clear_bit(__I40E_DOWN, pf->state);
15732 i40e_reset_and_rebuild(pf, false, true);
15736 /* Clear suspended state last after everything is recovered */
15737 clear_bit(__I40E_SUSPENDED, pf->state);
15739 /* Restart the service task */
15740 mod_timer(&pf->service_timer,
15741 round_jiffies(jiffies + pf->service_timer_period));
15746 static const struct pci_error_handlers i40e_err_handler = {
15747 .error_detected = i40e_pci_error_detected,
15748 .slot_reset = i40e_pci_error_slot_reset,
15749 .reset_prepare = i40e_pci_error_reset_prepare,
15750 .reset_done = i40e_pci_error_reset_done,
15751 .resume = i40e_pci_error_resume,
15754 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15756 static struct pci_driver i40e_driver = {
15757 .name = i40e_driver_name,
15758 .id_table = i40e_pci_tbl,
15759 .probe = i40e_probe,
15760 .remove = i40e_remove,
15762 .pm = &i40e_pm_ops,
15764 .shutdown = i40e_shutdown,
15765 .err_handler = &i40e_err_handler,
15766 .sriov_configure = i40e_pci_sriov_configure,
15770 * i40e_init_module - Driver registration routine
15772 * i40e_init_module is the first routine called when the driver is
15773 * loaded. All it does is register with the PCI subsystem.
15775 static int __init i40e_init_module(void)
15777 pr_info("%s: %s - version %s\n", i40e_driver_name,
15778 i40e_driver_string, i40e_driver_version_str);
15779 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15781 /* There is no need to throttle the number of active tasks because
15782 * each device limits its own task using a state bit for scheduling
15783 * the service task, and the device tasks do not interfere with each
15784 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
15785 * since we need to be able to guarantee forward progress even under
15788 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15790 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15795 return pci_register_driver(&i40e_driver);
15797 module_init(i40e_init_module);
15800 * i40e_exit_module - Driver exit cleanup routine
15802 * i40e_exit_module is called just before the driver is removed
15805 static void __exit i40e_exit_module(void)
15807 pci_unregister_driver(&i40e_driver);
15808 destroy_workqueue(i40e_wq);
15811 module_exit(i40e_exit_module);