2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bpf_trace.h>
37 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
40 struct mlx5e_xdp_info xdpi;
42 xdpi.xdpf = convert_to_xdp_frame(xdp);
43 if (unlikely(!xdpi.xdpf))
45 xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
46 dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
47 xdpi.xdpf->len, PCI_DMA_TODEVICE);
50 return mlx5e_xmit_xdp_frame(sq, &xdpi);
53 /* returns true if packet was consumed by xdp */
54 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
55 void *va, u16 *rx_headroom, u32 *len)
57 struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
65 xdp.data = va + *rx_headroom;
66 xdp_set_data_meta_invalid(&xdp);
67 xdp.data_end = xdp.data + *len;
68 xdp.data_hard_start = va;
69 xdp.rxq = &rq->xdp_rxq;
71 act = bpf_prog_run_xdp(prog, &xdp);
74 *rx_headroom = xdp.data - xdp.data_hard_start;
75 *len = xdp.data_end - xdp.data;
78 if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
80 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
83 /* When XDP enabled then page-refcnt==1 here */
84 err = xdp_do_redirect(rq->netdev, &xdp, prog);
87 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
88 rq->xdpsq.redirect_flush = true;
89 mlx5e_page_dma_unmap(rq, di);
90 rq->stats->xdp_redirect++;
93 bpf_warn_invalid_xdp_action(act);
97 trace_xdp_exception(rq->netdev, prog, act);
100 rq->stats->xdp_drop++;
105 bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
107 struct mlx5_wq_cyc *wq = &sq->wq;
108 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
109 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
111 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
112 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
113 struct mlx5_wqe_data_seg *dseg = wqe->data;
115 struct xdp_frame *xdpf = xdpi->xdpf;
116 dma_addr_t dma_addr = xdpi->dma_addr;
117 unsigned int dma_len = xdpf->len;
119 struct mlx5e_xdpsq_stats *stats = sq->stats;
123 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
128 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
130 /* SQ is full, ring doorbell */
131 mlx5e_xmit_xdp_doorbell(sq);
132 sq->doorbell = false;
140 /* copy the inline part if required */
141 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
142 memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
143 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
144 dma_len -= MLX5E_XDP_MIN_INLINE;
145 dma_addr += MLX5E_XDP_MIN_INLINE;
149 /* write the dma part */
150 dseg->addr = cpu_to_be64(dma_addr);
151 dseg->byte_count = cpu_to_be32(dma_len);
153 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
155 /* move page to reference to sq responsibility,
156 * and mark so it's not put back in page-cache.
158 sq->db.xdpi[pi] = *xdpi;
167 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
169 struct mlx5e_xdpsq *sq;
170 struct mlx5_cqe64 *cqe;
175 sq = container_of(cq, struct mlx5e_xdpsq, cq);
177 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
180 cqe = mlx5_cqwq_get_cqe(&cq->wq);
186 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
187 * otherwise a cq overrun may occur
196 mlx5_cqwq_pop(&cq->wq);
198 wqe_counter = be16_to_cpu(cqe->wqe_counter);
201 u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
202 struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
204 last_wqe = (sqcc == wqe_counter);
208 xdp_return_frame(xdpi->xdpf);
209 dma_unmap_single(sq->pdev, xdpi->dma_addr,
210 xdpi->xdpf->len, DMA_TO_DEVICE);
212 /* Recycle RX page */
213 mlx5e_page_release(rq, &xdpi->di, true);
216 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
218 sq->stats->cqes += i;
220 mlx5_cqwq_update_db_record(&cq->wq);
222 /* ensure cq space is freed before enabling more cqes */
226 return (i == MLX5E_TX_CQ_POLL_BUDGET);
229 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
231 bool is_redirect = !rq;
233 while (sq->cc != sq->pc) {
234 u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
235 struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
240 xdp_return_frame(xdpi->xdpf);
241 dma_unmap_single(sq->pdev, xdpi->dma_addr,
242 xdpi->xdpf->len, DMA_TO_DEVICE);
244 /* Recycle RX page */
245 mlx5e_page_release(rq, &xdpi->di, false);
250 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
253 struct mlx5e_priv *priv = netdev_priv(dev);
254 struct mlx5e_xdpsq *sq;
259 if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
262 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
265 sq_num = smp_processor_id();
267 if (unlikely(sq_num >= priv->channels.num))
270 sq = &priv->channels.c[sq_num]->xdpsq;
272 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
275 for (i = 0; i < n; i++) {
276 struct xdp_frame *xdpf = frames[i];
277 struct mlx5e_xdp_info xdpi;
279 xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len,
281 if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) {
282 xdp_return_frame_rx_napi(xdpf);
289 if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
290 dma_unmap_single(sq->pdev, xdpi.dma_addr,
291 xdpf->len, DMA_TO_DEVICE);
292 xdp_return_frame_rx_napi(xdpf);
297 if (flags & XDP_XMIT_FLUSH)
298 mlx5e_xmit_xdp_doorbell(sq);
303 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
305 struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
307 if (xdpsq->doorbell) {
308 mlx5e_xmit_xdp_doorbell(xdpsq);
309 xdpsq->doorbell = false;
312 if (xdpsq->redirect_flush) {
314 xdpsq->redirect_flush = false;