1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018 Hisilicon Limited.
7 #include <rdma/ib_umem.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15 struct hns_roce_srq *srq;
17 xa_lock(&srq_table->xa);
18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
20 refcount_inc(&srq->refcount);
21 xa_unlock(&srq_table->xa);
24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
28 srq->event(srq, event_type);
30 if (refcount_dec_and_test(&srq->refcount))
34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35 enum hns_roce_event event_type)
37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38 struct ib_srq *ibsrq = &srq->ibsrq;
39 struct ib_event event;
41 if (ibsrq->event_handler) {
42 event.device = ibsrq->device;
43 event.element.srq = ibsrq;
45 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
48 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49 event.event = IB_EVENT_SRQ_ERR;
53 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54 event_type, srq->srqn);
58 ibsrq->event_handler(&event, ibsrq->srq_context);
62 static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
63 struct hns_roce_cmd_mailbox *mailbox,
64 unsigned long srq_num)
66 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67 HNS_ROCE_CMD_CREATE_SRQ,
68 HNS_ROCE_CMD_TIMEOUT_MSECS);
71 static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
72 struct hns_roce_cmd_mailbox *mailbox,
73 unsigned long srq_num)
75 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76 mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
77 HNS_ROCE_CMD_TIMEOUT_MSECS);
80 static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
82 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
83 struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
84 struct ib_device *ibdev = &hr_dev->ib_dev;
85 struct hns_roce_cmd_mailbox *mailbox;
89 id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
92 ibdev_err(ibdev, "failed to alloc srq(%d).\n", id);
95 srq->srqn = (unsigned long)id;
97 ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
99 ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
103 ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
105 ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
109 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
110 if (IS_ERR_OR_NULL(mailbox)) {
111 ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
116 ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
118 ibdev_err(ibdev, "failed to write SRQC.\n");
122 ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
124 ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
128 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
133 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
135 xa_erase(&srq_table->xa, srq->srqn);
137 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
139 ida_free(&srq_ida->ida, id);
144 static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
146 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
149 ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
151 dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
154 xa_erase(&srq_table->xa, srq->srqn);
156 if (refcount_dec_and_test(&srq->refcount))
157 complete(&srq->free);
158 wait_for_completion(&srq->free);
160 hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
161 ida_free(&srq_table->srq_ida.ida, (int)srq->srqn);
164 static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
165 struct ib_udata *udata, unsigned long addr)
167 struct hns_roce_idx_que *idx_que = &srq->idx_que;
168 struct ib_device *ibdev = &hr_dev->ib_dev;
169 struct hns_roce_buf_attr buf_attr = {};
172 srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
174 buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
175 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
176 srq->idx_que.entry_shift);
177 buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
178 buf_attr.region_count = 1;
180 ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
181 hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
185 "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
190 idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
191 if (!idx_que->bitmap) {
192 ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
203 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
208 static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
210 struct hns_roce_idx_que *idx_que = &srq->idx_que;
212 bitmap_free(idx_que->bitmap);
213 idx_que->bitmap = NULL;
214 hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
217 static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
218 struct hns_roce_srq *srq,
219 struct ib_udata *udata, unsigned long addr)
221 struct ib_device *ibdev = &hr_dev->ib_dev;
222 struct hns_roce_buf_attr buf_attr = {};
225 srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
229 buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
230 buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
232 buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
233 buf_attr.region_count = 1;
235 ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
236 hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
240 "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
245 static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
246 struct hns_roce_srq *srq)
248 hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
251 static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
253 srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
260 static void free_srq_wrid(struct hns_roce_srq *srq)
266 static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
269 u32 max_sge = dev->caps.max_srq_sges;
271 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
274 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
275 * calculate number of max_sge with reserved SGEs when allocating wqe
276 * buf, so there is no need to do this again in kernel. But the number
277 * may exceed the capacity of SGEs recorded in the firmware, so the
278 * kernel driver should just adapt the value accordingly.
281 max_sge = roundup_pow_of_two(max_sge + 1);
288 static int set_srq_basic_param(struct hns_roce_srq *srq,
289 struct ib_srq_init_attr *init_attr,
290 struct ib_udata *udata)
292 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
293 struct ib_srq_attr *attr = &init_attr->attr;
296 max_sge = proc_srq_sge(hr_dev, srq, !!udata);
297 if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
298 attr->max_sge > max_sge) {
299 ibdev_err(&hr_dev->ib_dev,
300 "invalid SRQ attr, depth = %u, sge = %u.\n",
301 attr->max_wr, attr->max_sge);
305 attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
306 srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
307 srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
309 attr->max_wr = srq->wqe_cnt;
310 attr->max_sge = srq->max_gs - srq->rsv_sge;
316 static void set_srq_ext_param(struct hns_roce_srq *srq,
317 struct ib_srq_init_attr *init_attr)
319 srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
320 to_hr_cq(init_attr->ext.cq)->cqn : 0;
322 srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
323 to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
326 static int set_srq_param(struct hns_roce_srq *srq,
327 struct ib_srq_init_attr *init_attr,
328 struct ib_udata *udata)
332 ret = set_srq_basic_param(srq, init_attr, udata);
336 set_srq_ext_param(srq, init_attr);
341 static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
342 struct ib_udata *udata)
344 struct hns_roce_ib_create_srq ucmd = {};
348 ret = ib_copy_from_udata(&ucmd, udata,
349 min(udata->inlen, sizeof(ucmd)));
351 ibdev_err(&hr_dev->ib_dev,
352 "failed to copy SRQ udata, ret = %d.\n",
358 ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
362 ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
367 ret = alloc_srq_wrid(hr_dev, srq);
375 free_srq_wqe_buf(hr_dev, srq);
377 free_srq_idx(hr_dev, srq);
382 static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
385 free_srq_wqe_buf(hr_dev, srq);
386 free_srq_idx(hr_dev, srq);
389 int hns_roce_create_srq(struct ib_srq *ib_srq,
390 struct ib_srq_init_attr *init_attr,
391 struct ib_udata *udata)
393 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
394 struct hns_roce_ib_create_srq_resp resp = {};
395 struct hns_roce_srq *srq = to_hr_srq(ib_srq);
398 mutex_init(&srq->mutex);
399 spin_lock_init(&srq->lock);
401 ret = set_srq_param(srq, init_attr, udata);
405 ret = alloc_srq_buf(hr_dev, srq, udata);
409 ret = alloc_srqc(hr_dev, srq);
414 resp.srqn = srq->srqn;
415 if (ib_copy_to_udata(udata, &resp,
416 min(udata->outlen, sizeof(resp)))) {
422 srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
423 srq->event = hns_roce_ib_srq_event;
424 refcount_set(&srq->refcount, 1);
425 init_completion(&srq->free);
430 free_srqc(hr_dev, srq);
432 free_srq_buf(hr_dev, srq);
437 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
439 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
440 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
442 free_srqc(hr_dev, srq);
443 free_srq_buf(hr_dev, srq);
447 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
449 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
450 struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
452 xa_init(&srq_table->xa);
454 ida_init(&srq_ida->ida);
455 srq_ida->max = hr_dev->caps.num_srqs - 1;
456 srq_ida->min = hr_dev->caps.reserved_srqs;