net: mlx5: Add xdp tx return bulking support
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xdp.c
index ae90d533a3500436e26e88cca495436c9c3fbe16..2e3e78b0f333da928aaea74435bc0d2693354003 100644 (file)
@@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                                  struct mlx5e_xdp_wqe_info *wi,
                                  u32 *xsk_frames,
-                                 bool recycle)
+                                 bool recycle,
+                                 struct xdp_frame_bulk *bq)
 {
        struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
        u16 i;
@@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                        /* XDP_TX from the XSK RQ and XDP_REDIRECT */
                        dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
                                         xdpi.frame.xdpf->len, DMA_TO_DEVICE);
-                       xdp_return_frame(xdpi.frame.xdpf);
+                       xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
                        break;
                case MLX5E_XDP_XMIT_MODE_PAGE:
                        /* XDP_TX from the regular RQ */
@@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
 
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 {
+       struct xdp_frame_bulk bq;
        struct mlx5e_xdpsq *sq;
        struct mlx5_cqe64 *cqe;
        u32 xsk_frames = 0;
        u16 sqcc;
        int i;
 
+       xdp_frame_bulk_init(&bq);
+
        sq = container_of(cq, struct mlx5e_xdpsq, cq);
 
        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
@@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 
                        sqcc += wi->num_wqebbs;
 
-                       mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
+                       mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
                } while (!last_wqe);
 
                if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
                }
        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+       xdp_flush_frame_bulk(&bq);
+
        if (xsk_frames)
                xsk_tx_completed(sq->xsk_pool, xsk_frames);
 
@@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 
 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
 {
+       struct xdp_frame_bulk bq;
        u32 xsk_frames = 0;
 
+       xdp_frame_bulk_init(&bq);
+
+       rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
        while (sq->cc != sq->pc) {
                struct mlx5e_xdp_wqe_info *wi;
                u16 ci;
@@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
 
                sq->cc += wi->num_wqebbs;
 
-               mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
+               mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
        }
 
+       xdp_flush_frame_bulk(&bq);
+       rcu_read_unlock();
+
        if (xsk_frames)
                xsk_tx_completed(sq->xsk_pool, xsk_frames);
 }