2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include <rdma/uverbs_ioctl.h>
38 #include "hns_roce_common.h"
39 #include "hns_roce_device.h"
40 #include "hns_roce_hem.h"
42 static void flush_work_handle(struct work_struct *work)
44 struct hns_roce_work *flush_work = container_of(work,
45 struct hns_roce_work, work);
46 struct hns_roce_qp *hr_qp = container_of(flush_work,
47 struct hns_roce_qp, flush_work);
48 struct device *dev = flush_work->hr_dev->dev;
49 struct ib_qp_attr attr;
53 attr_mask = IB_QP_STATE;
54 attr.qp_state = IB_QPS_ERR;
56 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
59 dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
64 * make sure we signal QP destroy leg that flush QP was completed
65 * so that it can safely proceed ahead now and destroy QP
67 if (refcount_dec_and_test(&hr_qp->refcount))
68 complete(&hr_qp->free);
71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
73 struct hns_roce_work *flush_work = &hr_qp->flush_work;
75 flush_work->hr_dev = hr_dev;
76 INIT_WORK(&flush_work->work, flush_work_handle);
77 refcount_inc(&hr_qp->refcount);
78 queue_work(hr_dev->irq_workq, &flush_work->work);
81 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
84 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
85 * gets into errored mode. Hence, as a workaround to this
86 * hardware limitation, driver needs to assist in flushing. But
87 * the flushing operation uses mailbox to convey the QP state to
88 * the hardware and which can sleep due to the mutex protection
89 * around the mailbox calls. Hence, use the deferred flush for
92 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
93 init_flush_work(dev, qp);
96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
98 struct device *dev = hr_dev->dev;
99 struct hns_roce_qp *qp;
101 xa_lock(&hr_dev->qp_table_xa);
102 qp = __hns_roce_qp_lookup(hr_dev, qpn);
104 refcount_inc(&qp->refcount);
105 xa_unlock(&hr_dev->qp_table_xa);
108 dev_warn(dev, "async event for bogus QP %08x\n", qpn);
112 if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
113 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
114 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
115 event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
116 event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
117 qp->state = IB_QPS_ERR;
119 flush_cqe(hr_dev, qp);
122 qp->event(qp, (enum hns_roce_event)event_type);
124 if (refcount_dec_and_test(&qp->refcount))
128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
129 enum hns_roce_event type)
131 struct ib_qp *ibqp = &hr_qp->ibqp;
132 struct ib_event event;
134 if (ibqp->event_handler) {
135 event.device = ibqp->device;
136 event.element.qp = ibqp;
138 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
139 event.event = IB_EVENT_PATH_MIG;
141 case HNS_ROCE_EVENT_TYPE_COMM_EST:
142 event.event = IB_EVENT_COMM_EST;
144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
145 event.event = IB_EVENT_SQ_DRAINED;
147 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
148 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
150 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
151 event.event = IB_EVENT_QP_FATAL;
153 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
154 event.event = IB_EVENT_PATH_MIG_ERR;
156 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
157 event.event = IB_EVENT_QP_REQ_ERR;
159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
160 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
161 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
162 event.event = IB_EVENT_QP_ACCESS_ERR;
165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
169 ibqp->event_handler(&event, ibqp->qp_context);
173 static u8 get_affinity_cq_bank(u8 qp_bank)
175 return (qp_bank >> 1) & CQ_BANKID_MASK;
178 static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
179 struct hns_roce_bank *bank)
181 #define INVALID_LOAD_QPNUM 0xFFFFFFFF
182 struct ib_cq *scq = init_attr->send_cq;
183 u32 least_load = INVALID_LOAD_QPNUM;
184 unsigned long cqn = 0;
190 cqn = to_hr_cq(scq)->cqn;
192 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
193 if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
196 bankcnt = bank[i].inuse;
197 if (bankcnt < least_load) {
198 least_load = bankcnt;
206 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
211 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
213 id = ida_alloc_range(&bank->ida, bank->min, bank->max,
219 /* the QPN should keep increasing until the max value is reached. */
220 bank->next = (id + 1) > bank->max ? bank->min : id + 1;
222 /* the lower 3 bits is bankid */
223 *qpn = (id << 3) | bankid;
227 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
228 struct ib_qp_init_attr *init_attr)
230 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
231 unsigned long num = 0;
235 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
238 mutex_lock(&qp_table->bank_mutex);
239 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
241 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
244 ibdev_err(&hr_dev->ib_dev,
245 "failed to alloc QPN, ret = %d\n", ret);
246 mutex_unlock(&qp_table->bank_mutex);
250 qp_table->bank[bankid].inuse++;
251 mutex_unlock(&qp_table->bank_mutex);
259 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
260 struct hns_roce_qp *hr_qp,
261 struct ib_cq *send_cq, struct ib_cq *recv_cq)
263 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
266 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
267 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
269 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
270 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
272 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
274 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
276 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
278 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
279 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
282 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
283 struct hns_roce_qp *hr_qp,
284 struct ib_qp_init_attr *init_attr)
286 struct xarray *xa = &hr_dev->qp_table_xa;
292 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
294 dev_err(hr_dev->dev, "failed to xa store for QPC\n");
296 /* add QP to device's QP list for softwc */
297 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
303 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
305 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
306 struct device *dev = hr_dev->dev;
312 /* Alloc memory for QPC */
313 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
315 dev_err(dev, "failed to get QPC table\n");
319 /* Alloc memory for IRRL */
320 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
322 dev_err(dev, "failed to get IRRL table\n");
326 if (hr_dev->caps.trrl_entry_sz) {
327 /* Alloc memory for TRRL */
328 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
331 dev_err(dev, "failed to get TRRL table\n");
336 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
337 /* Alloc memory for SCC CTX */
338 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
341 dev_err(dev, "failed to get SCC CTX table\n");
349 if (hr_dev->caps.trrl_entry_sz)
350 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
353 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
356 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
362 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
364 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
367 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
369 struct xarray *xa = &hr_dev->qp_table_xa;
372 list_del(&hr_qp->node);
374 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
375 list_del(&hr_qp->sq_node);
377 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
378 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
379 list_del(&hr_qp->rq_node);
381 xa_lock_irqsave(xa, flags);
382 __xa_erase(xa, hr_qp->qpn);
383 xa_unlock_irqrestore(xa, flags);
386 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
388 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
390 if (hr_dev->caps.trrl_entry_sz)
391 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
392 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
395 static inline u8 get_qp_bankid(unsigned long qpn)
397 /* The lower 3 bits of QPN are used to hash to different banks */
398 return (u8)(qpn & GENMASK(2, 0));
401 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
405 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
408 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
411 bankid = get_qp_bankid(hr_qp->qpn);
413 ida_free(&hr_dev->qp_table.bank[bankid].ida,
414 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM);
416 mutex_lock(&hr_dev->qp_table.bank_mutex);
417 hr_dev->qp_table.bank[bankid].inuse--;
418 mutex_unlock(&hr_dev->qp_table.bank_mutex);
421 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
424 u32 max_sge = dev->caps.max_rq_sg;
426 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
429 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
430 * calculate number of max_sge with reserved SGEs when allocating wqe
431 * buf, so there is no need to do this again in kernel. But the number
432 * may exceed the capacity of SGEs recorded in the firmware, so the
433 * kernel driver should just adapt the value accordingly.
436 max_sge = roundup_pow_of_two(max_sge + 1);
438 hr_qp->rq.rsv_sge = 1;
443 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
444 struct hns_roce_qp *hr_qp, int has_rq, bool user)
446 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
449 /* If srq exist, set zero for relative number of rq */
451 hr_qp->rq.wqe_cnt = 0;
452 hr_qp->rq.max_gs = 0;
453 cap->max_recv_wr = 0;
454 cap->max_recv_sge = 0;
459 /* Check the validity of QP support capacity */
460 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
461 cap->max_recv_sge > max_sge) {
462 ibdev_err(&hr_dev->ib_dev,
463 "RQ config error, depth = %u, sge = %u\n",
464 cap->max_recv_wr, cap->max_recv_sge);
468 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
469 if (cnt > hr_dev->caps.max_wqes) {
470 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
475 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
478 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
481 hr_qp->rq.wqe_cnt = cnt;
483 cap->max_recv_wr = cnt;
484 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
489 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev,
490 struct ib_qp_cap *cap)
492 if (cap->max_inline_data) {
493 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data);
494 return min(cap->max_inline_data,
495 hr_dev->caps.max_sq_inline);
501 static void update_inline_data(struct hns_roce_qp *hr_qp,
502 struct ib_qp_cap *cap)
504 u32 sge_num = hr_qp->sq.ext_sge_cnt;
506 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
507 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI ||
508 hr_qp->ibqp.qp_type == IB_QPT_UD))
509 sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num);
511 cap->max_inline_data = max(cap->max_inline_data,
512 sge_num * HNS_ROCE_SGE_SIZE);
515 hr_qp->max_inline_data = cap->max_inline_data;
518 static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi,
521 unsigned int std_sge_num;
522 unsigned int min_sge;
524 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
525 min_sge = is_ud_or_gsi ? 1 : 0;
526 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) :
530 static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
533 unsigned int inline_sge;
535 inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
538 * if max_inline_data less than
539 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
540 * In addition to ud's mode, no need to extend sge.
542 if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
548 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
549 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
551 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
552 hr_qp->ibqp.qp_type == IB_QPT_UD);
553 unsigned int std_sge_num;
554 u32 inline_ext_sge = 0;
558 cap->max_inline_data = get_max_inline_data(hr_dev, cap);
560 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
561 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
562 ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi,
565 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
566 inline_ext_sge = max(ext_wqe_sge_cnt,
567 get_sge_num_from_max_inl_data(is_ud_or_gsi,
568 cap->max_inline_data));
569 hr_qp->sq.ext_sge_cnt = inline_ext_sge ?
570 roundup_pow_of_two(inline_ext_sge) : 0;
572 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num));
573 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
575 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt;
577 hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
578 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
579 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs;
582 /* If the number of extended sge is not zero, they MUST use the
583 * space of HNS_HW_PAGE_SIZE at least.
585 if (ext_wqe_sge_cnt) {
586 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt);
587 hr_qp->sge.sge_cnt = max(total_sge_cnt,
588 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
591 update_inline_data(hr_qp, cap);
594 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
595 struct ib_qp_cap *cap,
596 struct hns_roce_ib_create_qp *ucmd)
598 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
599 u8 max_sq_stride = ilog2(roundup_sq_stride);
601 /* Sanity check SQ size before proceeding */
602 if (ucmd->log_sq_stride > max_sq_stride ||
603 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
604 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
608 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
609 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
617 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
618 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
619 struct hns_roce_ib_create_qp *ucmd)
621 struct ib_device *ibdev = &hr_dev->ib_dev;
625 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
626 cnt > hr_dev->caps.max_wqes)
629 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
631 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
636 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
638 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
639 hr_qp->sq.wqe_cnt = cnt;
640 cap->max_send_sge = hr_qp->sq.max_gs;
645 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
646 struct hns_roce_qp *hr_qp,
647 struct hns_roce_buf_attr *buf_attr)
652 hr_qp->buff_size = 0;
655 hr_qp->sq.offset = 0;
656 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
657 hr_qp->sq.wqe_shift);
658 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
659 buf_attr->region[idx].size = buf_size;
660 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
662 hr_qp->buff_size += buf_size;
665 /* extend SGE WQE in SQ */
666 hr_qp->sge.offset = hr_qp->buff_size;
667 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
668 hr_qp->sge.sge_shift);
669 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
670 buf_attr->region[idx].size = buf_size;
671 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
673 hr_qp->buff_size += buf_size;
677 hr_qp->rq.offset = hr_qp->buff_size;
678 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
679 hr_qp->rq.wqe_shift);
680 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
681 buf_attr->region[idx].size = buf_size;
682 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
684 hr_qp->buff_size += buf_size;
687 if (hr_qp->buff_size < 1)
690 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
691 buf_attr->region_count = idx;
696 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
697 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
699 struct ib_device *ibdev = &hr_dev->ib_dev;
702 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
703 cap->max_send_sge > hr_dev->caps.max_sq_sg) {
704 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
708 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
709 if (cnt > hr_dev->caps.max_wqes) {
710 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
715 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
716 hr_qp->sq.wqe_cnt = cnt;
718 set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
720 /* sync the parameters of kernel QP to user's configuration */
721 cap->max_send_wr = cnt;
722 cap->max_send_sge = hr_qp->sq.max_gs;
727 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
729 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
735 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
737 if (attr->qp_type == IB_QPT_XRC_INI ||
738 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
739 !attr->cap.max_recv_wr)
745 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
746 struct ib_qp_init_attr *init_attr,
747 struct ib_udata *udata, unsigned long addr)
749 struct ib_device *ibdev = &hr_dev->ib_dev;
750 struct hns_roce_buf_attr buf_attr = {};
753 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
755 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
758 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
759 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
762 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
766 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
767 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
776 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
778 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
781 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
782 struct ib_qp_init_attr *init_attr,
783 struct ib_udata *udata,
784 struct hns_roce_ib_create_qp_resp *resp,
785 struct hns_roce_ib_create_qp *ucmd)
787 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
788 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
789 hns_roce_qp_has_sq(init_attr) &&
790 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
793 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
794 struct ib_qp_init_attr *init_attr,
795 struct ib_udata *udata,
796 struct hns_roce_ib_create_qp_resp *resp)
798 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
799 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
800 hns_roce_qp_has_rq(init_attr));
803 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
804 struct ib_qp_init_attr *init_attr)
806 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
807 hns_roce_qp_has_rq(init_attr));
810 static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
811 struct hns_roce_dev *hr_dev,
812 struct ib_udata *udata,
813 struct hns_roce_ib_create_qp_resp *resp)
815 struct hns_roce_ucontext *uctx =
816 rdma_udata_to_drv_context(udata,
817 struct hns_roce_ucontext, ibucontext);
818 struct rdma_user_mmap_entry *rdma_entry;
821 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
823 hr_qp->dwqe_mmap_entry =
824 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
826 HNS_ROCE_MMAP_TYPE_DWQE);
828 if (!hr_qp->dwqe_mmap_entry) {
829 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
833 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
834 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
839 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
840 struct hns_roce_qp *hr_qp,
841 struct ib_qp_init_attr *init_attr,
842 struct ib_udata *udata,
843 struct hns_roce_ib_create_qp *ucmd,
844 struct hns_roce_ib_create_qp_resp *resp)
846 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
847 struct hns_roce_ucontext, ibucontext);
848 struct ib_device *ibdev = &hr_dev->ib_dev;
851 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
852 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
855 "failed to map user SQ doorbell, ret = %d.\n",
859 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
862 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
863 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
866 "failed to map user RQ doorbell, ret = %d.\n",
870 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
876 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
877 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
882 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
883 struct hns_roce_qp *hr_qp,
884 struct ib_qp_init_attr *init_attr)
886 struct ib_device *ibdev = &hr_dev->ib_dev;
889 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
890 hr_qp->sq.db_reg = hr_dev->mem_base +
891 HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
893 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
894 DB_REG_OFFSET * hr_dev->priv_uar.index;
896 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
897 DB_REG_OFFSET * hr_dev->priv_uar.index;
899 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
900 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
903 "failed to alloc kernel RQ doorbell, ret = %d.\n",
907 *hr_qp->rdb.db_record = 0;
908 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
914 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
915 struct ib_qp_init_attr *init_attr,
916 struct ib_udata *udata,
917 struct hns_roce_ib_create_qp *ucmd,
918 struct hns_roce_ib_create_qp_resp *resp)
922 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
923 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
926 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
927 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
932 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
937 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
945 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
946 qp_user_mmap_entry_remove(hr_qp);
951 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
952 struct ib_udata *udata)
954 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
955 udata, struct hns_roce_ucontext, ibucontext);
958 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
959 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
960 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
961 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
962 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
963 qp_user_mmap_entry_remove(hr_qp);
965 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
966 hns_roce_free_db(hr_dev, &hr_qp->rdb);
970 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
971 struct hns_roce_qp *hr_qp)
973 struct ib_device *ibdev = &hr_dev->ib_dev;
978 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
979 if (ZERO_OR_NULL_PTR(sq_wrid)) {
980 ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
984 if (hr_qp->rq.wqe_cnt) {
985 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
986 if (ZERO_OR_NULL_PTR(rq_wrid)) {
987 ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
993 hr_qp->sq.wrid = sq_wrid;
994 hr_qp->rq.wrid = rq_wrid;
1002 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
1004 kfree(hr_qp->rq.wrid);
1005 kfree(hr_qp->sq.wrid);
1008 static void default_congest_type(struct hns_roce_dev *hr_dev,
1009 struct hns_roce_qp *hr_qp)
1011 if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
1012 hr_qp->ibqp.qp_type == IB_QPT_GSI)
1013 hr_qp->cong_type = CONG_TYPE_DCQCN;
1015 hr_qp->cong_type = hr_dev->caps.default_cong_type;
1018 static int set_congest_type(struct hns_roce_qp *hr_qp,
1019 struct hns_roce_ib_create_qp *ucmd)
1021 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1023 switch (ucmd->cong_type_flags) {
1024 case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
1025 hr_qp->cong_type = CONG_TYPE_DCQCN;
1027 case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
1028 hr_qp->cong_type = CONG_TYPE_LDCP;
1030 case HNS_ROCE_CREATE_QP_FLAGS_HC3:
1031 hr_qp->cong_type = CONG_TYPE_HC3;
1033 case HNS_ROCE_CREATE_QP_FLAGS_DIP:
1034 hr_qp->cong_type = CONG_TYPE_DIP;
1040 if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
1043 if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
1044 hr_qp->cong_type != CONG_TYPE_DCQCN)
1050 static int set_congest_param(struct hns_roce_dev *hr_dev,
1051 struct hns_roce_qp *hr_qp,
1052 struct hns_roce_ib_create_qp *ucmd)
1054 if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
1055 return set_congest_type(hr_qp, ucmd);
1057 default_congest_type(hr_dev, hr_qp);
1062 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1063 struct ib_qp_init_attr *init_attr,
1064 struct ib_udata *udata,
1065 struct hns_roce_ib_create_qp *ucmd)
1067 struct ib_device *ibdev = &hr_dev->ib_dev;
1068 struct hns_roce_ucontext *uctx;
1071 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1072 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
1074 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
1076 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
1077 hns_roce_qp_has_rq(init_attr), !!udata);
1079 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
1085 ret = ib_copy_from_udata(ucmd, udata,
1086 min(udata->inlen, sizeof(*ucmd)));
1089 "failed to copy QP ucmd, ret = %d\n", ret);
1093 uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
1095 hr_qp->config = uctx->config;
1096 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
1099 "failed to set user SQ size, ret = %d.\n",
1102 ret = set_congest_param(hr_dev, hr_qp, ucmd);
1106 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
1107 hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
1108 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
1111 "failed to set kernel SQ size, ret = %d.\n",
1114 default_congest_type(hr_dev, hr_qp);
1120 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
1121 struct ib_qp_init_attr *init_attr,
1122 struct ib_udata *udata,
1123 struct hns_roce_qp *hr_qp)
1125 struct hns_roce_ib_create_qp_resp resp = {};
1126 struct ib_device *ibdev = &hr_dev->ib_dev;
1127 struct hns_roce_ib_create_qp ucmd = {};
1130 mutex_init(&hr_qp->mutex);
1131 spin_lock_init(&hr_qp->sq.lock);
1132 spin_lock_init(&hr_qp->rq.lock);
1134 hr_qp->state = IB_QPS_RESET;
1135 hr_qp->flush_flag = 0;
1137 if (init_attr->create_flags)
1140 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
1142 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
1147 ret = alloc_kernel_wrid(hr_dev, hr_qp);
1149 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
1155 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
1157 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
1161 ret = alloc_qpn(hr_dev, hr_qp, init_attr);
1163 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
1167 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1169 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
1174 ret = alloc_qpc(hr_dev, hr_qp);
1176 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
1181 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1183 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
1188 resp.cap_flags = hr_qp->en_flags;
1189 ret = ib_copy_to_udata(udata, &resp,
1190 min(udata->outlen, sizeof(resp)));
1192 ibdev_err(ibdev, "copy qp resp failed!\n");
1197 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1198 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1203 hr_qp->ibqp.qp_num = hr_qp->qpn;
1204 hr_qp->event = hns_roce_ib_qp_event;
1205 refcount_set(&hr_qp->refcount, 1);
1206 init_completion(&hr_qp->free);
1211 hns_roce_qp_remove(hr_dev, hr_qp);
1213 free_qpc(hr_dev, hr_qp);
1215 free_qp_db(hr_dev, hr_qp, udata);
1217 free_qpn(hr_dev, hr_qp);
1219 free_qp_buf(hr_dev, hr_qp);
1221 free_kernel_wrid(hr_qp);
1223 mutex_destroy(&hr_qp->mutex);
1227 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1228 struct ib_udata *udata)
1230 if (refcount_dec_and_test(&hr_qp->refcount))
1231 complete(&hr_qp->free);
1232 wait_for_completion(&hr_qp->free);
1234 free_qpc(hr_dev, hr_qp);
1235 free_qpn(hr_dev, hr_qp);
1236 free_qp_buf(hr_dev, hr_qp);
1237 free_kernel_wrid(hr_qp);
1238 free_qp_db(hr_dev, hr_qp, udata);
1239 mutex_destroy(&hr_qp->mutex);
1242 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
1246 case IB_QPT_XRC_INI:
1247 case IB_QPT_XRC_TGT:
1248 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
1252 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
1266 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
1271 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1272 struct ib_udata *udata)
1274 struct ib_device *ibdev = qp->device;
1275 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
1276 struct hns_roce_qp *hr_qp = to_hr_qp(qp);
1279 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
1283 if (init_attr->qp_type == IB_QPT_XRC_TGT)
1284 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
1286 if (init_attr->qp_type == IB_QPT_GSI) {
1287 hr_qp->port = init_attr->port_num - 1;
1288 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1291 ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
1293 ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
1294 init_attr->qp_type, ret);
1298 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]);
1303 int to_hr_qp_type(int qp_type)
1307 return SERV_TYPE_RC;
1310 return SERV_TYPE_UD;
1311 case IB_QPT_XRC_INI:
1312 case IB_QPT_XRC_TGT:
1313 return SERV_TYPE_XRC;
1319 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1320 struct hns_roce_qp *hr_qp,
1321 struct ib_qp_attr *attr, int attr_mask)
1323 enum ib_mtu active_mtu;
1326 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1327 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1329 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1330 attr->path_mtu > hr_dev->caps.max_mtu) ||
1331 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1332 ibdev_err(&hr_dev->ib_dev,
1333 "attr path_mtu(%d)invalid while modify qp",
1341 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1344 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1345 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1348 if ((attr_mask & IB_QP_PORT) &&
1349 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1350 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1355 if (attr_mask & IB_QP_PKEY_INDEX) {
1356 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1357 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1358 ibdev_err(&hr_dev->ib_dev,
1359 "invalid attr, pkey_index = %u.\n",
1365 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1366 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1367 ibdev_err(&hr_dev->ib_dev,
1368 "invalid attr, max_rd_atomic = %u.\n",
1369 attr->max_rd_atomic);
1373 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1374 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1375 ibdev_err(&hr_dev->ib_dev,
1376 "invalid attr, max_dest_rd_atomic = %u.\n",
1377 attr->max_dest_rd_atomic);
1381 if (attr_mask & IB_QP_PATH_MTU)
1382 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1387 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1388 int attr_mask, struct ib_udata *udata)
1390 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1391 struct hns_roce_ib_modify_qp_resp resp = {};
1392 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1393 enum ib_qp_state cur_state, new_state;
1396 mutex_lock(&hr_qp->mutex);
1398 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1401 cur_state = hr_qp->state;
1402 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1404 if (ibqp->uobject &&
1405 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1406 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1407 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1409 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1410 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1412 ibdev_warn(&hr_dev->ib_dev,
1413 "flush cqe is not supported in userspace!\n");
1418 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1420 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1424 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1428 if (cur_state == new_state && cur_state == IB_QPS_RESET)
1431 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1436 if (udata && udata->outlen) {
1437 resp.tc_mode = hr_qp->tc_mode;
1438 resp.priority = hr_qp->sl;
1439 ret = ib_copy_to_udata(udata, &resp,
1440 min(udata->outlen, sizeof(resp)));
1442 ibdev_err_ratelimited(&hr_dev->ib_dev,
1443 "failed to copy modify qp resp.\n");
1447 mutex_unlock(&hr_qp->mutex);
1449 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]);
1454 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1455 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1457 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1458 __acquire(&send_cq->lock);
1459 __acquire(&recv_cq->lock);
1460 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1461 spin_lock_irq(&send_cq->lock);
1462 __acquire(&recv_cq->lock);
1463 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1464 spin_lock_irq(&recv_cq->lock);
1465 __acquire(&send_cq->lock);
1466 } else if (send_cq == recv_cq) {
1467 spin_lock_irq(&send_cq->lock);
1468 __acquire(&recv_cq->lock);
1469 } else if (send_cq->cqn < recv_cq->cqn) {
1470 spin_lock_irq(&send_cq->lock);
1471 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1473 spin_lock_irq(&recv_cq->lock);
1474 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1478 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1479 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1480 __releases(&recv_cq->lock)
1482 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1483 __release(&recv_cq->lock);
1484 __release(&send_cq->lock);
1485 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1486 __release(&recv_cq->lock);
1487 spin_unlock(&send_cq->lock);
1488 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1489 __release(&send_cq->lock);
1490 spin_unlock(&recv_cq->lock);
1491 } else if (send_cq == recv_cq) {
1492 __release(&recv_cq->lock);
1493 spin_unlock_irq(&send_cq->lock);
1494 } else if (send_cq->cqn < recv_cq->cqn) {
1495 spin_unlock(&recv_cq->lock);
1496 spin_unlock_irq(&send_cq->lock);
1498 spin_unlock(&send_cq->lock);
1499 spin_unlock_irq(&recv_cq->lock);
1503 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
1505 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1508 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1510 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1513 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1515 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1518 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
1520 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1523 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1524 struct ib_cq *ib_cq)
1526 struct hns_roce_cq *hr_cq;
1529 cur = hr_wq->head - hr_wq->tail;
1530 if (likely(cur + nreq < hr_wq->wqe_cnt))
1533 hr_cq = to_hr_cq(ib_cq);
1534 spin_lock(&hr_cq->lock);
1535 cur = hr_wq->head - hr_wq->tail;
1536 spin_unlock(&hr_cq->lock);
1538 return cur + nreq >= hr_wq->wqe_cnt;
1541 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1543 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1544 unsigned int reserved_from_bot;
1547 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
1548 sizeof(u32), GFP_KERNEL);
1549 if (!qp_table->idx_table.spare_idx)
1552 mutex_init(&qp_table->scc_mutex);
1553 mutex_init(&qp_table->bank_mutex);
1554 xa_init(&hr_dev->qp_table_xa);
1556 reserved_from_bot = hr_dev->caps.reserved_qps;
1558 for (i = 0; i < reserved_from_bot; i++) {
1559 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
1560 hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
1563 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
1564 ida_init(&hr_dev->qp_table.bank[i].ida);
1565 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
1566 HNS_ROCE_QP_BANK_NUM - 1;
1567 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
1573 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1577 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
1578 ida_destroy(&hr_dev->qp_table.bank[i].ida);
1579 mutex_destroy(&hr_dev->qp_table.bank_mutex);
1580 mutex_destroy(&hr_dev->qp_table.scc_mutex);
1581 kfree(hr_dev->qp_table.idx_table.spare_idx);