2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include <rdma/hns-abi.h>
42 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47 struct device *dev = hr_dev->dev;
48 struct hns_roce_qp *qp;
50 spin_lock(&qp_table->lock);
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
54 atomic_inc(&qp->refcount);
56 spin_unlock(&qp_table->lock);
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
63 qp->event(qp, (enum hns_roce_event)event_type);
65 if (atomic_dec_and_test(&qp->refcount))
68 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
70 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71 enum hns_roce_event type)
73 struct ib_event event;
74 struct ib_qp *ibqp = &hr_qp->ibqp;
76 if (ibqp->event_handler) {
77 event.device = ibqp->device;
78 event.element.qp = ibqp;
80 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81 event.event = IB_EVENT_PATH_MIG;
83 case HNS_ROCE_EVENT_TYPE_COMM_EST:
84 event.event = IB_EVENT_COMM_EST;
86 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87 event.event = IB_EVENT_SQ_DRAINED;
89 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
92 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93 event.event = IB_EVENT_QP_FATAL;
95 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96 event.event = IB_EVENT_PATH_MIG_ERR;
98 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99 event.event = IB_EVENT_QP_REQ_ERR;
101 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102 event.event = IB_EVENT_QP_ACCESS_ERR;
105 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
109 ibqp->event_handler(&event, ibqp->qp_context);
113 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114 int align, unsigned long *base)
116 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
118 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
121 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
125 return HNS_ROCE_QP_STATE_RST;
127 return HNS_ROCE_QP_STATE_INIT;
129 return HNS_ROCE_QP_STATE_RTR;
131 return HNS_ROCE_QP_STATE_RTS;
133 return HNS_ROCE_QP_STATE_SQD;
135 return HNS_ROCE_QP_STATE_ERR;
137 return HNS_ROCE_QP_NUM_STATE;
140 EXPORT_SYMBOL_GPL(to_hns_roce_state);
142 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
143 struct hns_roce_qp *hr_qp)
145 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
153 spin_lock_irq(&qp_table->lock);
154 ret = radix_tree_insert(&hr_dev->qp_table_tree,
155 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
156 spin_unlock_irq(&qp_table->lock);
158 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
162 atomic_set(&hr_qp->refcount, 1);
163 init_completion(&hr_qp->free);
172 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
173 struct hns_roce_qp *hr_qp)
175 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
176 struct device *dev = hr_dev->dev;
184 /* Alloc memory for QPC */
185 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
187 dev_err(dev, "QPC table get failed\n");
191 /* Alloc memory for IRRL */
192 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
194 dev_err(dev, "IRRL table get failed\n");
198 if (hr_dev->caps.trrl_entry_sz) {
199 /* Alloc memory for TRRL */
200 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
203 dev_err(dev, "TRRL table get failed\n");
208 spin_lock_irq(&qp_table->lock);
209 ret = radix_tree_insert(&hr_dev->qp_table_tree,
210 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
211 spin_unlock_irq(&qp_table->lock);
213 dev_err(dev, "QPC radix_tree_insert failed\n");
217 atomic_set(&hr_qp->refcount, 1);
218 init_completion(&hr_qp->free);
223 if (hr_dev->caps.trrl_entry_sz)
224 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
227 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
230 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
236 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
238 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
241 spin_lock_irqsave(&qp_table->lock, flags);
242 radix_tree_delete(&hr_dev->qp_table_tree,
243 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
244 spin_unlock_irqrestore(&qp_table->lock, flags);
246 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
248 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
250 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
252 if (atomic_dec_and_test(&hr_qp->refcount))
253 complete(&hr_qp->free);
254 wait_for_completion(&hr_qp->free);
256 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
257 if (hr_dev->caps.trrl_entry_sz)
258 hns_roce_table_put(hr_dev, &qp_table->trrl_table,
260 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
261 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
264 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
266 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
269 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
271 if (base_qpn < SQP_NUM)
274 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
276 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
278 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
279 struct ib_qp_cap *cap, int is_user, int has_srq,
280 struct hns_roce_qp *hr_qp)
282 struct device *dev = hr_dev->dev;
285 /* Check the validity of QP support capacity */
286 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
287 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
288 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
289 cap->max_recv_wr, cap->max_recv_sge);
293 /* If srq exit, set zero for relative number of rq */
295 if (cap->max_recv_wr) {
296 dev_dbg(dev, "srq no need config max_recv_wr\n");
300 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
302 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
303 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
307 if (hr_dev->caps.min_wqes)
308 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
310 max_cnt = cap->max_recv_wr;
312 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
314 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
315 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
319 max_cnt = max(1U, cap->max_recv_sge);
320 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
321 if (hr_dev->caps.max_rq_sg <= 2)
322 hr_qp->rq.wqe_shift =
323 ilog2(hr_dev->caps.max_rq_desc_sz);
325 hr_qp->rq.wqe_shift =
326 ilog2(hr_dev->caps.max_rq_desc_sz
330 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
331 cap->max_recv_sge = hr_qp->rq.max_gs;
336 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
337 struct ib_qp_cap *cap,
338 struct hns_roce_qp *hr_qp,
339 struct hns_roce_ib_create_qp *ucmd)
341 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
342 u8 max_sq_stride = ilog2(roundup_sq_stride);
346 /* Sanity check SQ size before proceeding */
347 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
348 ucmd->log_sq_stride > max_sq_stride ||
349 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
350 dev_err(hr_dev->dev, "check SQ size error!\n");
354 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
355 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
360 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
361 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
363 max_cnt = max(1U, cap->max_send_sge);
364 if (hr_dev->caps.max_sq_sg <= 2)
365 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
367 hr_qp->sq.max_gs = max_cnt;
369 if (hr_qp->sq.max_gs > 2)
370 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
371 (hr_qp->sq.max_gs - 2));
372 hr_qp->sge.sge_shift = 4;
374 /* Get buf size, SQ and RQ are aligned to page_szie */
375 if (hr_dev->caps.max_sq_sg <= 2) {
376 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
377 hr_qp->rq.wqe_shift), PAGE_SIZE) +
378 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
379 hr_qp->sq.wqe_shift), PAGE_SIZE);
381 hr_qp->sq.offset = 0;
382 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
383 hr_qp->sq.wqe_shift), PAGE_SIZE);
385 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
386 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
387 hr_qp->rq.wqe_shift), page_size) +
388 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
389 hr_qp->sge.sge_shift), page_size) +
390 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
391 hr_qp->sq.wqe_shift), page_size);
393 hr_qp->sq.offset = 0;
394 if (hr_qp->sge.sge_cnt) {
395 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
396 (hr_qp->sq.wqe_cnt <<
397 hr_qp->sq.wqe_shift),
399 hr_qp->rq.offset = hr_qp->sge.offset +
400 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
401 hr_qp->sge.sge_shift),
404 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
405 (hr_qp->sq.wqe_cnt <<
406 hr_qp->sq.wqe_shift),
414 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
415 struct ib_qp_cap *cap,
416 struct hns_roce_qp *hr_qp)
418 struct device *dev = hr_dev->dev;
423 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
424 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
425 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
426 dev_err(dev, "SQ WR or sge or inline data error!\n");
430 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
431 hr_qp->sq_max_wqes_per_wr = 1;
432 hr_qp->sq_spare_wqes = 0;
434 if (hr_dev->caps.min_wqes)
435 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
437 max_cnt = cap->max_send_wr;
439 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
440 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
441 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
445 /* Get data_seg numbers */
446 max_cnt = max(1U, cap->max_send_sge);
447 if (hr_dev->caps.max_sq_sg <= 2)
448 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
450 hr_qp->sq.max_gs = max_cnt;
452 if (hr_qp->sq.max_gs > 2) {
453 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
454 (hr_qp->sq.max_gs - 2));
455 hr_qp->sge.sge_shift = 4;
458 /* ud sqwqe's sge use extend sge */
459 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
460 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
462 hr_qp->sge.sge_shift = 4;
465 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
466 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
467 hr_qp->sq.offset = 0;
468 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
471 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
472 hr_qp->sge.offset = size;
473 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
474 hr_qp->sge.sge_shift, page_size);
477 hr_qp->rq.offset = size;
478 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
480 hr_qp->buff_size = size;
482 /* Get wr and sge number which send */
483 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
484 cap->max_send_sge = hr_qp->sq.max_gs;
486 /* We don't support inline sends for kernel QPs (yet) */
487 cap->max_inline_data = 0;
492 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
494 struct ib_qp_init_attr *init_attr,
495 struct ib_udata *udata, unsigned long sqpn,
496 struct hns_roce_qp *hr_qp)
498 struct device *dev = hr_dev->dev;
499 struct hns_roce_ib_create_qp ucmd;
500 unsigned long qpn = 0;
506 mutex_init(&hr_qp->mutex);
507 spin_lock_init(&hr_qp->sq.lock);
508 spin_lock_init(&hr_qp->rq.lock);
510 hr_qp->state = IB_QPS_RESET;
512 hr_qp->ibqp.qp_type = init_attr->qp_type;
514 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
515 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
517 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
519 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
520 !!init_attr->srq, hr_qp);
522 dev_err(dev, "hns_roce_set_rq_size failed\n");
526 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
527 /* allocate recv inline buf */
528 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
529 sizeof(struct hns_roce_rinl_wqe),
531 if (!hr_qp->rq_inl_buf.wqe_list) {
536 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
538 /* Firstly, allocate a list of sge space buffer */
539 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
540 kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
541 init_attr->cap.max_recv_sge *
542 sizeof(struct hns_roce_rinl_sge),
544 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
549 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
550 /* Secondly, reallocate the buffer */
551 hr_qp->rq_inl_buf.wqe_list[i].sg_list =
552 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
553 init_attr->cap.max_recv_sge];
556 if (ib_pd->uobject) {
557 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
558 dev_err(dev, "ib_copy_from_udata error for create qp\n");
560 goto err_rq_sge_list;
563 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
566 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
567 goto err_rq_sge_list;
570 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
571 ucmd.buf_addr, hr_qp->buff_size, 0,
573 if (IS_ERR(hr_qp->umem)) {
574 dev_err(dev, "ib_umem_get error for create qp\n");
575 ret = PTR_ERR(hr_qp->umem);
576 goto err_rq_sge_list;
579 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
580 if (hr_dev->caps.mtt_buf_pg_sz) {
581 npages = (ib_umem_page_count(hr_qp->umem) +
582 (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
583 (1 << hr_dev->caps.mtt_buf_pg_sz);
584 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
585 ret = hns_roce_mtt_init(hr_dev, npages,
589 ret = hns_roce_mtt_init(hr_dev,
590 ib_umem_page_count(hr_qp->umem),
591 hr_qp->umem->page_shift,
595 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
599 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
602 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
606 if (init_attr->create_flags &
607 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
608 dev_err(dev, "init_attr->create_flags error!\n");
610 goto err_rq_sge_list;
613 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
614 dev_err(dev, "init_attr->create_flags error!\n");
616 goto err_rq_sge_list;
620 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
623 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
624 goto err_rq_sge_list;
627 /* QP doorbell register address */
628 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
629 DB_REG_OFFSET * hr_dev->priv_uar.index;
630 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
631 DB_REG_OFFSET * hr_dev->priv_uar.index;
633 /* Allocate QP buf */
634 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
635 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
636 (1 << page_shift) * 2,
637 &hr_qp->hr_buf, page_shift)) {
638 dev_err(dev, "hns_roce_buf_alloc error!\n");
640 goto err_rq_sge_list;
643 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
645 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
646 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
648 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
652 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
655 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
659 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
661 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
663 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
673 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
675 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
680 if (init_attr->qp_type == IB_QPT_GSI &&
681 hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
682 /* In v1 engine, GSI QP context in RoCE engine's register */
683 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
685 dev_err(dev, "hns_roce_qp_alloc failed!\n");
689 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
691 dev_err(dev, "hns_roce_qp_alloc failed!\n");
697 hr_qp->doorbell_qpn = 1;
699 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
701 hr_qp->event = hns_roce_ib_qp_event;
707 hns_roce_release_range_qp(hr_dev, qpn, 1);
710 kfree(hr_qp->sq.wrid);
711 kfree(hr_qp->rq.wrid);
714 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
718 ib_umem_release(hr_qp->umem);
720 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
723 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
724 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
727 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
728 kfree(hr_qp->rq_inl_buf.wqe_list);
734 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
735 struct ib_qp_init_attr *init_attr,
736 struct ib_udata *udata)
738 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
739 struct device *dev = hr_dev->dev;
740 struct hns_roce_sqp *hr_sqp;
741 struct hns_roce_qp *hr_qp;
744 switch (init_attr->qp_type) {
746 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
748 return ERR_PTR(-ENOMEM);
750 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
753 dev_err(dev, "Create RC QP failed\n");
758 hr_qp->ibqp.qp_num = hr_qp->qpn;
763 /* Userspace is not allowed to create special QPs: */
765 dev_err(dev, "not support usr space GSI\n");
766 return ERR_PTR(-EINVAL);
769 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
771 return ERR_PTR(-ENOMEM);
773 hr_qp = &hr_sqp->hr_qp;
774 hr_qp->port = init_attr->port_num - 1;
775 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
777 /* when hw version is v1, the sqpn is allocated */
778 if (hr_dev->caps.max_sq_sg <= 2)
779 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
780 hr_dev->iboe.phy_port[hr_qp->port];
782 hr_qp->ibqp.qp_num = 1;
784 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
785 hr_qp->ibqp.qp_num, hr_qp);
787 dev_err(dev, "Create GSI QP failed!\n");
795 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
796 return ERR_PTR(-EINVAL);
802 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
804 int to_hr_qp_type(int qp_type)
808 if (qp_type == IB_QPT_RC)
809 transport_type = SERV_TYPE_RC;
810 else if (qp_type == IB_QPT_UC)
811 transport_type = SERV_TYPE_UC;
812 else if (qp_type == IB_QPT_UD)
813 transport_type = SERV_TYPE_UD;
814 else if (qp_type == IB_QPT_GSI)
815 transport_type = SERV_TYPE_UD;
819 return transport_type;
821 EXPORT_SYMBOL_GPL(to_hr_qp_type);
823 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
824 int attr_mask, struct ib_udata *udata)
826 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
827 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
828 enum ib_qp_state cur_state, new_state;
829 struct device *dev = hr_dev->dev;
832 enum ib_mtu active_mtu;
834 mutex_lock(&hr_qp->mutex);
836 cur_state = attr_mask & IB_QP_CUR_STATE ?
837 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
838 new_state = attr_mask & IB_QP_STATE ?
839 attr->qp_state : cur_state;
841 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
842 IB_LINK_LAYER_ETHERNET)) {
843 dev_err(dev, "ib_modify_qp_is_ok failed\n");
847 if ((attr_mask & IB_QP_PORT) &&
848 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
849 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
854 if (attr_mask & IB_QP_PKEY_INDEX) {
855 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
856 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
857 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
863 if (attr_mask & IB_QP_PATH_MTU) {
864 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
865 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
867 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
868 attr->path_mtu > IB_MTU_4096) ||
869 (hr_dev->caps.max_mtu == IB_MTU_2048 &&
870 attr->path_mtu > IB_MTU_2048) ||
871 attr->path_mtu < IB_MTU_256 ||
872 attr->path_mtu > active_mtu) {
873 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
879 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
880 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
881 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
882 attr->max_rd_atomic);
886 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
887 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
888 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
889 attr->max_dest_rd_atomic);
893 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
898 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
902 mutex_unlock(&hr_qp->mutex);
907 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
908 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
910 if (send_cq == recv_cq) {
911 spin_lock_irq(&send_cq->lock);
912 __acquire(&recv_cq->lock);
913 } else if (send_cq->cqn < recv_cq->cqn) {
914 spin_lock_irq(&send_cq->lock);
915 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
917 spin_lock_irq(&recv_cq->lock);
918 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
921 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
923 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
924 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
925 __releases(&recv_cq->lock)
927 if (send_cq == recv_cq) {
928 __release(&recv_cq->lock);
929 spin_unlock_irq(&send_cq->lock);
930 } else if (send_cq->cqn < recv_cq->cqn) {
931 spin_unlock(&recv_cq->lock);
932 spin_unlock_irq(&send_cq->lock);
934 spin_unlock(&send_cq->lock);
935 spin_unlock_irq(&recv_cq->lock);
938 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
940 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
943 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
946 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
948 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
950 EXPORT_SYMBOL_GPL(get_recv_wqe);
952 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
954 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
956 EXPORT_SYMBOL_GPL(get_send_wqe);
958 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
960 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
961 (n << hr_qp->sge.sge_shift));
963 EXPORT_SYMBOL_GPL(get_send_extend_sge);
965 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
968 struct hns_roce_cq *hr_cq;
971 cur = hr_wq->head - hr_wq->tail;
972 if (likely(cur + nreq < hr_wq->max_post))
975 hr_cq = to_hr_cq(ib_cq);
976 spin_lock(&hr_cq->lock);
977 cur = hr_wq->head - hr_wq->tail;
978 spin_unlock(&hr_cq->lock);
980 return cur + nreq >= hr_wq->max_post;
982 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
984 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
986 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
987 int reserved_from_top = 0;
990 spin_lock_init(&qp_table->lock);
991 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
993 /* A port include two SQP, six port total 12 */
994 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
995 hr_dev->caps.num_qps - 1, SQP_NUM,
998 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1006 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1008 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);