1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/transobj.h>
10 /* data path - accessed per cqe */
13 /* data path - accessed per napi poll */
14 struct mlx5_core_cq mcq;
17 struct mlx5_core_dev *mdev;
18 struct mlx5_wq_ctrl wq_ctrl;
19 } ____cacheline_aligned_in_smp;
26 struct mlx5_wqe_ctrl_seg *doorbell_cseg;
27 struct mlx5_aso_cq cq;
30 struct mlx5_wq_cyc wq;
31 void __iomem *uar_map;
35 struct mlx5_wq_ctrl wq_ctrl;
37 } ____cacheline_aligned_in_smp;
39 static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
41 mlx5_wq_destroy(&cq->wq_ctrl);
44 static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
45 void *cqc_data, struct mlx5_aso_cq *cq)
47 struct mlx5_core_cq *mcq = &cq->mcq;
48 struct mlx5_wq_param param;
52 param.buf_numa_node = numa_node;
53 param.db_numa_node = numa_node;
55 err = mlx5_cqwq_create(mdev, ¶m, cqc_data, &cq->wq, &cq->wq_ctrl);
60 mcq->set_ci_db = cq->wq_ctrl.db.db;
61 mcq->arm_db = cq->wq_ctrl.db.db + 1;
63 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
64 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
74 static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
76 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
77 struct mlx5_core_dev *mdev = cq->mdev;
78 struct mlx5_core_cq *mcq = &cq->mcq;
83 err = mlx5_vector2eqn(mdev, 0, &eqn);
87 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
88 sizeof(u64) * cq->wq_ctrl.buf.npages;
89 in = kvzalloc(inlen, GFP_KERNEL);
93 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
95 memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
97 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
98 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
100 MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
101 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
102 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
103 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
104 MLX5_ADAPTER_PAGE_SHIFT);
105 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
107 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
114 static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
116 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
117 mlx5_wq_destroy(&cq->wq_ctrl);
120 static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
121 struct mlx5_aso_cq *cq)
126 cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
130 MLX5_SET(cqc, cqc_data, log_cq_size, 1);
131 MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
132 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
133 MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
135 err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
137 mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
141 err = create_aso_cq(cq, cqc_data);
143 mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
151 mlx5_aso_free_cq(cq);
157 static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
158 void *sqc_data, struct mlx5_aso *sq)
160 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
161 struct mlx5_wq_cyc *wq = &sq->wq;
162 struct mlx5_wq_param param;
165 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
167 param.db_numa_node = numa_node;
168 param.buf_numa_node = numa_node;
169 err = mlx5_wq_cyc_create(mdev, ¶m, sqc_wq, wq, &sq->wq_ctrl);
172 wq->db = &wq->db[MLX5_SND_DBR];
177 static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
178 void *sqc_data, struct mlx5_aso *sq)
183 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
184 sizeof(u64) * sq->wq_ctrl.buf.npages;
185 in = kvzalloc(inlen, GFP_KERNEL);
189 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
190 wq = MLX5_ADDR_OF(sqc, sqc, wq);
192 memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
193 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
195 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
196 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
198 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
199 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
200 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
201 MLX5_ADAPTER_PAGE_SHIFT);
202 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
204 mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
205 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
207 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
214 static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
219 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
220 in = kvzalloc(inlen, GFP_KERNEL);
224 MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
225 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
226 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
228 err = mlx5_core_modify_sq(mdev, sqn, in);
235 static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
236 void *sqc_data, struct mlx5_aso *sq)
240 err = create_aso_sq(mdev, pdn, sqc_data, sq);
244 err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
246 mlx5_core_destroy_sq(mdev, sq->sqn);
251 static void mlx5_aso_free_sq(struct mlx5_aso *sq)
253 mlx5_wq_destroy(&sq->wq_ctrl);
256 static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
258 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
259 mlx5_aso_free_sq(sq);
262 static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
263 u32 pdn, struct mlx5_aso *sq)
268 sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
272 wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
273 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
274 MLX5_SET(wq, wq, pd, pdn);
275 MLX5_SET(wq, wq, log_wq_sz, 1);
277 err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
279 mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
283 err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
285 mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
289 mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
295 mlx5_aso_free_sq(sq);
301 struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
303 int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
304 struct mlx5_aso *aso;
307 aso = kzalloc(sizeof(*aso), GFP_KERNEL);
309 return ERR_PTR(-ENOMEM);
311 err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
315 err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
322 mlx5_aso_destroy_cq(&aso->cq);
328 void mlx5_aso_destroy(struct mlx5_aso *aso)
330 if (IS_ERR_OR_NULL(aso))
333 mlx5_aso_destroy_sq(aso);
334 mlx5_aso_destroy_cq(&aso->cq);
338 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
339 struct mlx5_aso_wqe *aso_wqe,
340 u32 obj_id, u32 opc_mode)
342 struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
344 cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
345 (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
346 MLX5_OPCODE_ACCESS_ASO);
347 cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
348 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
349 cseg->general_id = cpu_to_be32(obj_id);
352 void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
356 pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
357 return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
360 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
361 struct mlx5_wqe_ctrl_seg *doorbell_cseg)
363 doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
364 /* ensure wqe is visible to device before updating doorbell record */
368 aso->pc += MLX5_ASO_WQEBBS_DATA;
370 aso->pc += MLX5_ASO_WQEBBS;
371 *aso->wq.db = cpu_to_be32(aso->pc);
373 /* ensure doorbell record is visible to device before ringing the
378 mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
380 /* Ensure doorbell is written on uar_page before poll_cq */
381 WRITE_ONCE(doorbell_cseg, NULL);
384 int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
386 struct mlx5_aso_cq *cq = &aso->cq;
387 struct mlx5_cqe64 *cqe;
388 unsigned long expires;
390 cqe = mlx5_cqwq_get_cqe(&cq->wq);
392 expires = jiffies + msecs_to_jiffies(interval_ms);
393 while (!cqe && time_is_after_jiffies(expires)) {
395 cqe = mlx5_cqwq_get_cqe(&cq->wq);
401 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
402 * otherwise a cq overrun may occur
404 mlx5_cqwq_pop(&cq->wq);
406 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
407 struct mlx5_err_cqe *err_cqe;
409 mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
410 get_cqe_opcode(cqe));
412 err_cqe = (struct mlx5_err_cqe *)cqe;
413 mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
414 err_cqe->vendor_err_synd);
415 mlx5_core_err(cq->mdev, "syndrome=%x\n",
417 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
419 sizeof(*err_cqe), false);
422 mlx5_cqwq_update_db_record(&cq->wq);
424 /* ensure cq space is freed before enabling more cqes */
428 aso->cc += MLX5_ASO_WQEBBS_DATA;
430 aso->cc += MLX5_ASO_WQEBBS;