1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
8 struct mlx5e_xsk_param *xsk,
9 struct mlx5_core_dev *mdev)
11 /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current
12 * mlx5e XDP implementation doesn't support multiple packets per page.
14 if (xsk->chunk_size != PAGE_SIZE)
17 /* Current MTU and XSK headroom don't allow packets to fit the frames. */
18 if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size)
21 /* frag_sz is different for regular and XSK RQs, so ensure that linear
22 * SKB mode is possible.
24 switch (params->rq_wq_type) {
25 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
26 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
27 default: /* MLX5_WQ_TYPE_CYCLIC */
28 return mlx5e_rx_is_linear_skb(params, xsk);
32 static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv,
34 struct mlx5e_sq_param *param)
36 void *sqc = param->sqc;
37 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
39 mlx5e_build_sq_param_common(priv, param);
41 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
44 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
45 struct mlx5e_params *params,
46 struct mlx5e_xsk_param *xsk,
47 struct mlx5e_channel_param *cparam)
49 const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
51 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
52 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
53 mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq);
54 mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq);
55 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
56 mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq);
59 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
60 struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
61 struct mlx5e_channel *c)
63 struct mlx5e_channel_param cparam = {};
64 struct dim_cq_moder icocq_moder = {};
67 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
70 mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
72 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
76 err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
80 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
84 /* Create a separate SQ, so that when the UMEM is disabled, we could
85 * close this SQ safely and stop receiving CQEs. In other case, e.g., if
86 * the XDPSQ was used instead, we might run into trouble when the UMEM
87 * is disabled and then reenabled, but the SQ continues receiving CQEs
90 err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
94 err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
98 /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
99 * triggered and NAPI to be called on the correct CPU.
101 err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
103 goto err_close_icocq;
105 spin_lock_init(&c->xskicosq_lock);
107 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
112 mlx5e_close_cq(&c->xskicosq.cq);
115 mlx5e_close_xdpsq(&c->xsksq);
118 mlx5e_close_cq(&c->xsksq.cq);
121 mlx5e_close_rq(&c->xskrq);
124 mlx5e_close_cq(&c->xskrq.cq);
129 void mlx5e_close_xsk(struct mlx5e_channel *c)
131 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
132 napi_synchronize(&c->napi);
134 mlx5e_close_rq(&c->xskrq);
135 mlx5e_close_cq(&c->xskrq.cq);
136 mlx5e_close_icosq(&c->xskicosq);
137 mlx5e_close_cq(&c->xskicosq.cq);
138 mlx5e_close_xdpsq(&c->xsksq);
139 mlx5e_close_cq(&c->xsksq.cq);
142 void mlx5e_activate_xsk(struct mlx5e_channel *c)
144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
145 /* TX queue is created active. */
147 spin_lock(&c->xskicosq_lock);
148 mlx5e_trigger_irq(&c->xskicosq);
149 spin_unlock(&c->xskicosq_lock);
152 void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
154 mlx5e_deactivate_rq(&c->xskrq);
155 /* TX queue is disabled on close. */
158 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
160 struct mlx5e_redirect_rqt_param direct_rrp = {
167 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
169 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
172 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
174 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
177 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
179 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
182 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
186 if (!priv->xsk.refcnt)
189 for (i = 0; i < chs->num; i++) {
190 struct mlx5e_channel *c = chs->c[i];
192 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
195 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
203 for (i--; i >= 0; i--) {
204 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
207 mlx5e_xsk_redirect_rqt_to_drop(priv, i);
213 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
217 if (!priv->xsk.refcnt)
220 for (i = 0; i < chs->num; i++) {
221 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
224 mlx5e_xsk_redirect_rqt_to_drop(priv, i);