2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bpf_trace.h>
36 /* returns true if packet was consumed by xdp */
37 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
38 void *va, u16 *rx_headroom, u32 *len)
40 struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
48 xdp.data = va + *rx_headroom;
49 xdp_set_data_meta_invalid(&xdp);
50 xdp.data_end = xdp.data + *len;
51 xdp.data_hard_start = va;
52 xdp.rxq = &rq->xdp_rxq;
54 act = bpf_prog_run_xdp(prog, &xdp);
57 *rx_headroom = xdp.data - xdp.data_hard_start;
58 *len = xdp.data_end - xdp.data;
61 if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
62 trace_xdp_exception(rq->netdev, prog, act);
65 /* When XDP enabled then page-refcnt==1 here */
66 err = xdp_do_redirect(rq->netdev, &xdp, prog);
68 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
69 rq->xdpsq.db.redirect_flush = true;
70 mlx5e_page_dma_unmap(rq, di);
72 rq->stats->xdp_redirect++;
75 bpf_warn_invalid_xdp_action(act);
77 trace_xdp_exception(rq->netdev, prog, act);
79 rq->stats->xdp_drop++;
84 bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
85 const struct xdp_buff *xdp)
87 struct mlx5e_xdpsq *sq = &rq->xdpsq;
88 struct mlx5_wq_cyc *wq = &sq->wq;
89 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
90 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
92 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
93 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
94 struct mlx5_wqe_data_seg *dseg;
96 ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
97 dma_addr_t dma_addr = di->addr + data_offset;
98 unsigned int dma_len = xdp->data_end - xdp->data;
100 struct mlx5e_rq_stats *stats = rq->stats;
104 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
109 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
110 if (sq->db.doorbell) {
111 /* SQ is full, ring doorbell */
112 mlx5e_xmit_xdp_doorbell(sq);
113 sq->db.doorbell = false;
115 stats->xdp_tx_full++;
119 dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
123 dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
125 /* copy the inline part if required */
126 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
127 memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
128 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
129 dma_len -= MLX5E_XDP_MIN_INLINE;
130 dma_addr += MLX5E_XDP_MIN_INLINE;
134 /* write the dma part */
135 dseg->addr = cpu_to_be64(dma_addr);
136 dseg->byte_count = cpu_to_be32(dma_len);
138 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
140 /* move page to reference to sq responsibility,
141 * and mark so it's not put back in page-cache.
143 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
147 sq->db.doorbell = true;
153 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
155 struct mlx5e_xdpsq *sq;
156 struct mlx5_cqe64 *cqe;
161 sq = container_of(cq, struct mlx5e_xdpsq, cq);
163 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
166 cqe = mlx5_cqwq_get_cqe(&cq->wq);
170 rq = container_of(sq, struct mlx5e_rq, xdpsq);
172 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
173 * otherwise a cq overrun may occur
182 mlx5_cqwq_pop(&cq->wq);
184 wqe_counter = be16_to_cpu(cqe->wqe_counter);
187 struct mlx5e_dma_info *di;
190 last_wqe = (sqcc == wqe_counter);
192 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
196 /* Recycle RX page */
197 mlx5e_page_release(rq, di, true);
199 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
201 rq->stats->xdp_tx_cqe += i;
203 mlx5_cqwq_update_db_record(&cq->wq);
205 /* ensure cq space is freed before enabling more cqes */
209 return (i == MLX5E_TX_CQ_POLL_BUDGET);
212 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
214 struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
215 struct mlx5e_dma_info *di;
218 while (sq->cc != sq->pc) {
219 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
223 mlx5e_page_release(rq, di, false);