1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include <net/xdp_sock_drv.h>
7 #include "ice_dcb_lib.h"
11 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
12 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
14 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
16 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
18 unsigned int offset, i;
20 mutex_lock(qs_cfg->qs_mutex);
21 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
22 0, qs_cfg->q_count, 0);
23 if (offset >= qs_cfg->pf_map_size) {
24 mutex_unlock(qs_cfg->qs_mutex);
28 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
29 for (i = 0; i < qs_cfg->q_count; i++)
30 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
31 mutex_unlock(qs_cfg->qs_mutex);
37 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
38 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
40 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
42 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
44 unsigned int i, index = 0;
46 mutex_lock(qs_cfg->qs_mutex);
47 for (i = 0; i < qs_cfg->q_count; i++) {
48 index = find_next_zero_bit(qs_cfg->pf_map,
49 qs_cfg->pf_map_size, index);
50 if (index >= qs_cfg->pf_map_size)
52 set_bit(index, qs_cfg->pf_map);
53 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
55 mutex_unlock(qs_cfg->qs_mutex);
59 for (index = 0; index < i; index++) {
60 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
61 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
63 mutex_unlock(qs_cfg->qs_mutex);
69 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
70 * @pf: the PF being configured
72 * @ena: enable or disable state of the queue
74 * This routine will wait for the given Rx queue of the PF to reach the
75 * enabled or disabled state.
76 * Returns -ETIMEDOUT in case of failing to reach the requested state after
77 * multiple retries; else will return 0 in case of success.
79 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
83 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
84 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
85 QRX_CTRL_QENA_STAT_M))
95 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
96 * @vsi: the VSI being configured
97 * @v_idx: index of the vector in the VSI struct
99 * We allocate one q_vector and set default value for ITR setting associated
100 * with this q_vector. If allocation fails we return -ENOMEM.
102 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
104 struct ice_pf *pf = vsi->back;
105 struct ice_q_vector *q_vector;
108 /* allocate q_vector */
109 q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
114 q_vector->v_idx = v_idx;
115 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
116 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
117 q_vector->tx.itr_mode = ITR_DYNAMIC;
118 q_vector->rx.itr_mode = ITR_DYNAMIC;
119 q_vector->tx.type = ICE_TX_CONTAINER;
120 q_vector->rx.type = ICE_RX_CONTAINER;
121 q_vector->irq.index = -ENOENT;
123 if (vsi->type == ICE_VSI_VF) {
124 q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
126 } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
127 struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
130 if (unlikely(!ctrl_vsi->q_vectors)) {
132 goto err_free_q_vector;
135 q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
140 q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc);
141 if (q_vector->irq.index < 0) {
143 goto err_free_q_vector;
147 q_vector->reg_idx = q_vector->irq.index;
149 /* only set affinity_mask if the CPU is online */
150 if (cpu_online(v_idx))
151 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
153 /* This will not be called in the driver load path because the netdev
154 * will not be created yet. All other cases with register the NAPI
155 * handler here (i.e. resume, reset/rebuild, etc.)
158 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
161 /* tie q_vector and VSI together */
162 vsi->q_vectors[v_idx] = q_vector;
173 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
174 * @vsi: VSI having the memory freed
175 * @v_idx: index of the vector to be freed
177 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
179 struct ice_q_vector *q_vector;
180 struct ice_pf *pf = vsi->back;
181 struct ice_tx_ring *tx_ring;
182 struct ice_rx_ring *rx_ring;
185 dev = ice_pf_to_dev(pf);
186 if (!vsi->q_vectors[v_idx]) {
187 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
190 q_vector = vsi->q_vectors[v_idx];
192 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
193 ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
195 tx_ring->q_vector = NULL;
197 ice_for_each_rx_ring(rx_ring, q_vector->rx) {
198 ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
200 rx_ring->q_vector = NULL;
203 /* only VSI with an associated netdev is set up with NAPI */
205 netif_napi_del(&q_vector->napi);
207 /* release MSIX interrupt if q_vector had interrupt allocated */
208 if (q_vector->irq.index < 0)
211 /* only free last VF ctrl vsi interrupt */
212 if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
213 ice_get_vf_ctrl_vsi(pf, vsi))
216 ice_free_irq(pf, q_vector->irq);
220 vsi->q_vectors[v_idx] = NULL;
224 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
225 * @hw: board specific structure
227 static void ice_cfg_itr_gran(struct ice_hw *hw)
229 u32 regval = rd32(hw, GLINT_CTL);
231 /* no need to update global register if ITR gran is already set */
232 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
233 (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
234 (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
235 (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
236 (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
239 regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
240 FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
241 FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
242 FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
243 wr32(hw, GLINT_CTL, regval);
247 * ice_calc_txq_handle - calculate the queue handle
248 * @vsi: VSI that ring belongs to
249 * @ring: ring to get the absolute queue index
250 * @tc: traffic class number
252 static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
254 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
257 return ring->q_index - ring->ch->base_q;
259 /* Idea here for calculation is that we subtract the number of queue
260 * count from TC that ring belongs to from it's absolute queue index
261 * and as a result we get the queue's index within TC.
263 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
267 * ice_eswitch_calc_txq_handle
268 * @ring: pointer to ring which unique index is needed
270 * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
271 * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
272 * here by finding index in vsi->tx_rings of this ring.
274 * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
275 * because VSI is get from ring->vsi, so it has to be present in this VSI.
277 static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
279 const struct ice_vsi *vsi = ring->vsi;
282 ice_for_each_txq(vsi, i) {
283 if (vsi->tx_rings[i] == ring)
287 return ICE_INVAL_Q_INDEX;
291 * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
292 * @ring: The Tx ring to configure
294 * This enables/disables XPS for a given Tx descriptor ring
295 * based on the TCs enabled for the VSI that ring belongs to.
297 static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
299 if (!ring->q_vector || !ring->netdev)
302 /* We only initialize XPS once, so as not to overwrite user settings */
303 if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
306 netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
311 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
312 * @ring: The Tx ring to configure
313 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
314 * @pf_q: queue index in the PF space
316 * Configure the Tx descriptor ring in TLAN context.
319 ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
321 struct ice_vsi *vsi = ring->vsi;
322 struct ice_hw *hw = &vsi->back->hw;
324 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
326 tlan_ctx->port_num = vsi->port_info->lport;
328 /* Transmit Queue Length */
329 tlan_ctx->qlen = ring->count;
331 ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
334 tlan_ctx->pf_num = hw->pf_id;
336 /* queue belongs to a specific VSI type
337 * VF / VM index should be programmed per vmvf_type setting:
338 * for vmvf_type = VF, it is VF number between 0-256
339 * for vmvf_type = VM, it is VM number between 0-767
340 * for PF or EMP this field should be set to zero
347 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
349 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
352 /* Firmware expects vmvf_num to be absolute VF ID */
353 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
354 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
356 case ICE_VSI_SWITCHDEV_CTRL:
357 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
363 /* make sure the context is associated with the right VSI */
365 tlan_ctx->src_vsi = ring->ch->vsi_num;
367 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
369 /* Restrict Tx timestamps to the PF VSI */
372 tlan_ctx->tsyn_ena = 1;
378 tlan_ctx->tso_ena = ICE_TX_LEGACY;
379 tlan_ctx->tso_qnum = pf_q;
381 /* Legacy or Advanced Host Interface:
382 * 0: Advanced Host Interface
383 * 1: Legacy Host Interface
385 tlan_ctx->legacy_int = ICE_TX_LEGACY;
389 * ice_rx_offset - Return expected offset into page to access data
390 * @rx_ring: Ring we are requesting offset of
392 * Returns the offset value for ring into the data buffer.
394 static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
396 if (ice_ring_uses_build_skb(rx_ring))
402 * ice_setup_rx_ctx - Configure a receive ring context
403 * @ring: The Rx ring to configure
405 * Configure the Rx descriptor ring in RLAN context.
407 static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
409 struct ice_vsi *vsi = ring->vsi;
410 u32 rxdid = ICE_RXDID_FLEX_NIC;
411 struct ice_rlan_ctx rlan_ctx;
418 /* what is Rx queue number in global space of 2K Rx queues */
419 pf_q = vsi->rxq_map[ring->q_index];
421 /* clear the context structure first */
422 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
424 /* Receive Queue Base Address.
425 * Indicates the starting address of the descriptor queue defined in
428 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
430 rlan_ctx.qlen = ring->count;
432 /* Receive Packet Data Buffer Size.
433 * The Packet Data Buffer Size is defined in 128 byte units.
435 rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
436 BIT_ULL(ICE_RLAN_CTX_DBUF_S));
438 /* use 32 byte descriptors */
441 /* Strip the Ethernet CRC bytes before the packet is posted to host
444 rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
446 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
447 * and it needs to remain 1 for non-DVM capable configurations to not
448 * break backward compatibility for VF drivers. Setting this field to 0
449 * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
450 * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
451 * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
454 if (ice_is_dvm_ena(hw))
455 if (vsi->type == ICE_VSI_VF &&
456 ice_vf_is_port_vlan_ena(vsi->vf))
463 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
464 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
465 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
467 /* This controls whether VLAN is stripped from inner headers
468 * The VLAN in the inner L2 header is stripped to the receive
469 * descriptor if enabled by this flag.
473 /* Max packet size for this queue - must not be set to a larger value
476 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
477 ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
479 /* Rx queue threshold in units of 64 */
480 rlan_ctx.lrxqthresh = 1;
482 /* Enable Flexible Descriptors in the queue context which
483 * allows this driver to select a specific receive descriptor format
484 * increasing context priority to pick up profile ID; default is 0x01;
485 * setting to 0x03 to ensure profile is programming if prev context is
488 if (vsi->type != ICE_VSI_VF)
489 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
491 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
494 /* Absolute queue number out of 2K needs to be passed */
495 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
497 dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
502 if (vsi->type == ICE_VSI_VF)
505 /* configure Rx buffer alignment */
506 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
507 ice_clear_ring_build_skb_ena(ring);
509 ice_set_ring_build_skb_ena(ring);
511 ring->rx_offset = ice_rx_offset(ring);
513 /* init queue specific tail register */
514 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
515 writel(0, ring->tail);
520 static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
522 void *ctx_ptr = &ring->pkt_ctx;
523 struct xsk_cb_desc desc = {};
525 XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
527 desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
528 sizeof(struct xdp_buff);
529 desc.bytes = sizeof(ctx_ptr);
530 xsk_pool_fill_cb(ring->xsk_pool, &desc);
534 * ice_vsi_cfg_rxq - Configure an Rx queue
535 * @ring: the ring being configured
537 * Return 0 on success and a negative value on error.
539 static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
541 struct device *dev = ice_pf_to_dev(ring->vsi->back);
542 u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
545 ring->rx_buf_len = ring->vsi->rx_buf_len;
547 if (ring->vsi->type == ICE_VSI_PF) {
548 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
549 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
551 ring->q_vector->napi.napi_id,
557 ring->xsk_pool = ice_xsk_pool(ring);
558 if (ring->xsk_pool) {
559 xdp_rxq_info_unreg(&ring->xdp_rxq);
562 xsk_pool_get_rx_frame_size(ring->xsk_pool);
563 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
565 ring->q_vector->napi.napi_id,
569 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
570 MEM_TYPE_XSK_BUFF_POOL,
574 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
575 ice_xsk_pool_fill_cb(ring);
577 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
580 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
581 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
583 ring->q_vector->napi.napi_id,
589 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
590 MEM_TYPE_PAGE_SHARED,
597 xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
598 ring->xdp.data = NULL;
599 ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
600 err = ice_setup_rx_ctx(ring);
602 dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
607 if (ring->xsk_pool) {
610 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
611 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
612 num_bufs, ring->q_index);
613 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
618 ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
620 u16 pf_q = ring->vsi->rxq_map[ring->q_index];
622 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
623 ring->q_index, pf_q);
629 ice_alloc_rx_bufs(ring, num_bufs);
634 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
636 if (q_idx >= vsi->num_rxq)
639 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
643 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
646 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
648 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
649 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
650 vsi->rx_buf_len = ICE_RXBUF_1664;
651 #if (PAGE_SIZE < 8192)
652 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
653 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
654 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
655 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
658 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
659 vsi->rx_buf_len = ICE_RXBUF_3072;
664 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
665 * @vsi: the VSI being configured
667 * Return 0 on success and a negative value on error
668 * Configure the Rx VSI for operation.
670 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
674 if (vsi->type == ICE_VSI_VF)
677 ice_vsi_cfg_frame_size(vsi);
679 /* set up individual rings */
680 ice_for_each_rxq(vsi, i) {
681 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
691 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
692 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
694 * This function first tries to find contiguous space. If it is not successful,
695 * it tries with the scatter approach.
697 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
699 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
703 ret = __ice_vsi_get_qs_contig(qs_cfg);
705 /* contig failed, so try with scatter approach */
706 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
707 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
708 qs_cfg->scatter_count);
709 ret = __ice_vsi_get_qs_sc(qs_cfg);
715 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
716 * @vsi: the VSI being configured
717 * @ena: start or stop the Rx ring
718 * @rxq_idx: 0-based Rx queue index for the VSI passed in
719 * @wait: wait or don't wait for configuration to finish in hardware
721 * Return 0 on success and negative on error.
724 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
726 int pf_q = vsi->rxq_map[rxq_idx];
727 struct ice_pf *pf = vsi->back;
728 struct ice_hw *hw = &pf->hw;
731 rx_reg = rd32(hw, QRX_CTRL(pf_q));
733 /* Skip if the queue is already in the requested state */
734 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
737 /* turn on/off the queue */
739 rx_reg |= QRX_CTRL_QENA_REQ_M;
741 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
742 wr32(hw, QRX_CTRL(pf_q), rx_reg);
748 return ice_pf_rxq_wait(pf, pf_q, ena);
752 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
753 * @vsi: the VSI being configured
754 * @ena: true/false to verify Rx ring has been enabled/disabled respectively
755 * @rxq_idx: 0-based Rx queue index for the VSI passed in
757 * This routine will wait for the given Rx queue of the VSI to reach the
758 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
759 * the requested state after multiple retries; else will return 0 in case of
762 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
764 int pf_q = vsi->rxq_map[rxq_idx];
765 struct ice_pf *pf = vsi->back;
767 return ice_pf_rxq_wait(pf, pf_q, ena);
771 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
772 * @vsi: the VSI being configured
774 * We allocate one q_vector per queue interrupt. If allocation fails we
777 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
779 struct device *dev = ice_pf_to_dev(vsi->back);
783 if (vsi->q_vectors[0]) {
784 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
788 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
789 err = ice_vsi_alloc_q_vector(vsi, v_idx);
798 ice_free_q_vector(vsi, v_idx);
800 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
801 vsi->num_q_vectors, vsi->vsi_num, err);
802 vsi->num_q_vectors = 0;
807 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
808 * @vsi: the VSI being configured
810 * This function maps descriptor rings to the queue-specific vectors allotted
811 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
812 * and Rx rings to the vector as "efficiently" as possible.
814 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
816 int q_vectors = vsi->num_q_vectors;
817 u16 tx_rings_rem, rx_rings_rem;
820 /* initially assigning remaining rings count to VSIs num queue value */
821 tx_rings_rem = vsi->num_txq;
822 rx_rings_rem = vsi->num_rxq;
824 for (v_id = 0; v_id < q_vectors; v_id++) {
825 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
826 u8 tx_rings_per_v, rx_rings_per_v;
829 /* Tx rings mapping to vector */
830 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
832 q_vector->num_ring_tx = tx_rings_per_v;
833 q_vector->tx.tx_ring = NULL;
834 q_vector->tx.itr_idx = ICE_TX_ITR;
835 q_base = vsi->num_txq - tx_rings_rem;
837 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
838 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
840 tx_ring->q_vector = q_vector;
841 tx_ring->next = q_vector->tx.tx_ring;
842 q_vector->tx.tx_ring = tx_ring;
844 tx_rings_rem -= tx_rings_per_v;
846 /* Rx rings mapping to vector */
847 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
849 q_vector->num_ring_rx = rx_rings_per_v;
850 q_vector->rx.rx_ring = NULL;
851 q_vector->rx.itr_idx = ICE_RX_ITR;
852 q_base = vsi->num_rxq - rx_rings_rem;
854 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
855 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
857 rx_ring->q_vector = q_vector;
858 rx_ring->next = q_vector->rx.rx_ring;
859 q_vector->rx.rx_ring = rx_ring;
861 rx_rings_rem -= rx_rings_per_v;
866 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
867 * @vsi: the VSI having memory freed
869 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
873 ice_for_each_q_vector(vsi, v_idx)
874 ice_free_q_vector(vsi, v_idx);
876 vsi->num_q_vectors = 0;
880 * ice_vsi_cfg_txq - Configure single Tx queue
881 * @vsi: the VSI that queue belongs to
882 * @ring: Tx ring to be configured
883 * @qg_buf: queue group buffer
886 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
887 struct ice_aqc_add_tx_qgrp *qg_buf)
889 u8 buf_len = struct_size(qg_buf, txqs, 1);
890 struct ice_tlan_ctx tlan_ctx = { 0 };
891 struct ice_aqc_add_txqs_perq *txq;
892 struct ice_channel *ch = ring->ch;
893 struct ice_pf *pf = vsi->back;
894 struct ice_hw *hw = &pf->hw;
900 ice_cfg_xps_tx_ring(ring);
902 pf_q = ring->reg_idx;
903 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
904 /* copy context contents into the qg_buf */
905 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
906 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
909 /* init queue specific tail reg. It is referred as
910 * transmit comm scheduler queue doorbell.
912 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
914 if (IS_ENABLED(CONFIG_DCB))
919 /* Add unique software queue handle of the Tx queue per
920 * TC into the VSI Tx ring
922 if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
923 ring->q_handle = ice_eswitch_calc_txq_handle(ring);
925 if (ring->q_handle == ICE_INVAL_Q_INDEX)
928 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
932 status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
933 ring->q_handle, 1, qg_buf, buf_len,
936 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
937 ring->q_handle, 1, qg_buf, buf_len,
940 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
945 /* Add Tx Queue TEID into the VSI Tx ring from the
946 * response. This will complete configuring and
947 * enabling the queue.
949 txq = &qg_buf->txqs[0];
950 if (pf_q == le16_to_cpu(txq->txq_id))
951 ring->txq_teid = le32_to_cpu(txq->q_teid);
956 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
959 DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
961 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
964 qg_buf->num_txqs = 1;
966 return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
970 * ice_vsi_cfg_txqs - Configure the VSI for Tx
971 * @vsi: the VSI being configured
972 * @rings: Tx ring array to be configured
973 * @count: number of Tx ring array elements
975 * Return 0 on success and a negative value on error
976 * Configure the Tx VSI for operation.
979 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
981 DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
985 qg_buf->num_txqs = 1;
987 for (q_idx = 0; q_idx < count; q_idx++) {
988 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
997 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
998 * @vsi: the VSI being configured
1000 * Return 0 on success and a negative value on error
1001 * Configure the Tx VSI for operation.
1003 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1005 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1009 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1010 * @vsi: the VSI being configured
1012 * Return 0 on success and a negative value on error
1013 * Configure the Tx queues dedicated for XDP in given VSI for operation.
1015 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1020 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1024 ice_for_each_rxq(vsi, i)
1025 ice_tx_xsk_pool(vsi, i);
1031 * ice_cfg_itr - configure the initial interrupt throttle values
1032 * @hw: pointer to the HW structure
1033 * @q_vector: interrupt vector that's being configured
1035 * Configure interrupt throttling values for the ring containers that are
1036 * associated with the interrupt vector passed in.
1038 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1040 ice_cfg_itr_gran(hw);
1042 if (q_vector->num_ring_rx)
1043 ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
1045 if (q_vector->num_ring_tx)
1046 ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
1048 ice_write_intrl(q_vector, q_vector->intrl);
1052 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1053 * @vsi: the VSI being configured
1054 * @txq: Tx queue being mapped to MSI-X vector
1055 * @msix_idx: MSI-X vector index within the function
1056 * @itr_idx: ITR index of the interrupt cause
1058 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1059 * within the function space.
1062 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1064 struct ice_pf *pf = vsi->back;
1065 struct ice_hw *hw = &pf->hw;
1068 itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
1070 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1071 FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
1073 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1074 if (ice_is_xdp_ena_vsi(vsi)) {
1075 u32 xdp_txq = txq + vsi->num_xdp_txq;
1077 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
1084 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1085 * @vsi: the VSI being configured
1086 * @rxq: Rx queue being mapped to MSI-X vector
1087 * @msix_idx: MSI-X vector index within the function
1088 * @itr_idx: ITR index of the interrupt cause
1090 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1091 * within the function space.
1094 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1096 struct ice_pf *pf = vsi->back;
1097 struct ice_hw *hw = &pf->hw;
1100 itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
1102 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1103 FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
1105 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1111 * ice_trigger_sw_intr - trigger a software interrupt
1112 * @hw: pointer to the HW structure
1113 * @q_vector: interrupt vector to trigger the software interrupt for
1115 void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
1117 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
1118 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
1119 GLINT_DYN_CTL_SWINT_TRIG_M |
1120 GLINT_DYN_CTL_INTENA_M);
1124 * ice_vsi_stop_tx_ring - Disable single Tx ring
1125 * @vsi: the VSI being configured
1126 * @rst_src: reset source
1127 * @rel_vmvf_num: Relative ID of VF/VM
1128 * @ring: Tx ring to be stopped
1129 * @txq_meta: Meta data of Tx ring to be stopped
1132 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1133 u16 rel_vmvf_num, struct ice_tx_ring *ring,
1134 struct ice_txq_meta *txq_meta)
1136 struct ice_pf *pf = vsi->back;
1137 struct ice_q_vector *q_vector;
1138 struct ice_hw *hw = &pf->hw;
1142 /* clear cause_ena bit for disabled queues */
1143 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
1144 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1145 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
1147 /* software is expected to wait for 100 ns */
1150 /* trigger a software interrupt for the vector
1151 * associated to the queue to schedule NAPI handler
1153 q_vector = ring->q_vector;
1154 if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
1155 ice_trigger_sw_intr(hw, q_vector);
1157 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
1158 txq_meta->tc, 1, &txq_meta->q_handle,
1159 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
1160 rel_vmvf_num, NULL);
1162 /* if the disable queue command was exercised during an
1163 * active reset flow, -EBUSY is returned.
1164 * This is not an error as the reset operation disables
1165 * queues at the hardware level anyway.
1167 if (status == -EBUSY) {
1168 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
1169 } else if (status == -ENOENT) {
1170 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
1171 } else if (status) {
1172 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
1181 * ice_fill_txq_meta - Prepare the Tx queue's meta data
1182 * @vsi: VSI that ring belongs to
1183 * @ring: ring that txq_meta will be based on
1184 * @txq_meta: a helper struct that wraps Tx queue's information
1186 * Set up a helper struct that will contain all the necessary fields that
1187 * are needed for stopping Tx queue
1190 ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
1191 struct ice_txq_meta *txq_meta)
1193 struct ice_channel *ch = ring->ch;
1196 if (IS_ENABLED(CONFIG_DCB))
1201 txq_meta->q_id = ring->reg_idx;
1202 txq_meta->q_teid = ring->txq_teid;
1203 txq_meta->q_handle = ring->q_handle;
1205 txq_meta->vsi_idx = ch->ch_vsi->idx;
1208 txq_meta->vsi_idx = vsi->idx;