1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
6 #include <linux/errno.h>
7 #include <linux/types.h>
8 #include <linux/uaccess.h>
9 #include <linux/vmalloc.h>
10 #include <linux/xarray.h>
11 #include <net/addrconf.h>
13 #include <rdma/iw_cm.h>
14 #include <rdma/ib_verbs.h>
15 #include <rdma/ib_user_verbs.h>
16 #include <rdma/uverbs_ioctl.h>
19 #include "siw_verbs.h"
22 static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
23 [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
24 [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
25 [IB_QPS_RTR] = SIW_QP_STATE_RTR,
26 [IB_QPS_RTS] = SIW_QP_STATE_RTS,
27 [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
28 [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
29 [IB_QPS_ERR] = SIW_QP_STATE_ERROR
32 static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
33 [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
34 [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
38 void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
40 struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
45 int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
47 struct siw_ucontext *uctx = to_siw_ctx(ctx);
48 size_t size = vma->vm_end - vma->vm_start;
49 struct rdma_user_mmap_entry *rdma_entry;
50 struct siw_user_mmap_entry *entry;
54 * Must be page aligned
56 if (vma->vm_start & (PAGE_SIZE - 1)) {
57 pr_warn("siw: mmap not page aligned\n");
60 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
62 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
66 entry = to_siw_mmap_entry(rdma_entry);
68 rv = remap_vmalloc_range(vma, entry->address, 0);
70 pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
75 rdma_user_mmap_entry_put(rdma_entry);
80 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
82 struct siw_device *sdev = to_siw_dev(base_ctx->device);
83 struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
84 struct siw_uresp_alloc_ctx uresp = {};
87 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
93 uresp.dev_id = sdev->vendor_part_id;
95 if (udata->outlen < sizeof(uresp)) {
99 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
103 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
104 atomic_read(&sdev->num_ctx));
109 atomic_dec(&sdev->num_ctx);
110 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
111 atomic_read(&sdev->num_ctx));
116 void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
118 struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
120 atomic_dec(&uctx->sdev->num_ctx);
123 int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
124 struct ib_udata *udata)
126 struct siw_device *sdev = to_siw_dev(base_dev);
128 if (udata->inlen || udata->outlen)
131 memset(attr, 0, sizeof(*attr));
133 /* Revisit atomic caps if RFC 7306 gets supported */
134 attr->atomic_cap = 0;
135 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
136 attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
137 attr->max_cq = sdev->attrs.max_cq;
138 attr->max_cqe = sdev->attrs.max_cqe;
139 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
140 attr->max_mr = sdev->attrs.max_mr;
141 attr->max_mw = sdev->attrs.max_mw;
142 attr->max_mr_size = ~0ull;
143 attr->max_pd = sdev->attrs.max_pd;
144 attr->max_qp = sdev->attrs.max_qp;
145 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
146 attr->max_qp_rd_atom = sdev->attrs.max_ord;
147 attr->max_qp_wr = sdev->attrs.max_qp_wr;
148 attr->max_recv_sge = sdev->attrs.max_sge;
149 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
150 attr->max_send_sge = sdev->attrs.max_sge;
151 attr->max_sge_rd = sdev->attrs.max_sge_rd;
152 attr->max_srq = sdev->attrs.max_srq;
153 attr->max_srq_sge = sdev->attrs.max_srq_sge;
154 attr->max_srq_wr = sdev->attrs.max_srq_wr;
155 attr->page_size_cap = PAGE_SIZE;
156 attr->vendor_id = SIW_VENDOR_ID;
157 attr->vendor_part_id = sdev->vendor_part_id;
159 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
160 sdev->netdev->dev_addr);
165 int siw_query_port(struct ib_device *base_dev, u32 port,
166 struct ib_port_attr *attr)
168 struct siw_device *sdev = to_siw_dev(base_dev);
171 memset(attr, 0, sizeof(*attr));
173 rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
174 &attr->active_width);
175 attr->gid_tbl_len = 1;
176 attr->max_msg_sz = -1;
177 attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
178 attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
179 attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
180 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
181 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
182 attr->state = sdev->state;
187 * attr->bad_pkey_cntr = 0;
188 * attr->qkey_viol_cntr = 0;
191 * attr->max_vl_num = 0;
193 * attr->subnet_timeout = 0;
194 * attr->init_type_repy = 0;
199 int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
200 struct ib_port_immutable *port_immutable)
202 struct ib_port_attr attr;
203 int rv = siw_query_port(base_dev, port, &attr);
208 port_immutable->gid_tbl_len = attr.gid_tbl_len;
209 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
214 int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
217 struct siw_device *sdev = to_siw_dev(base_dev);
219 /* subnet_prefix == interface_id == 0; */
220 memset(gid, 0, sizeof(*gid));
221 memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
226 int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
228 struct siw_device *sdev = to_siw_dev(pd->device);
230 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
231 atomic_dec(&sdev->num_pd);
234 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
239 int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
241 struct siw_device *sdev = to_siw_dev(pd->device);
243 siw_dbg_pd(pd, "free PD\n");
244 atomic_dec(&sdev->num_pd);
248 void siw_qp_get_ref(struct ib_qp *base_qp)
250 siw_qp_get(to_siw_qp(base_qp));
253 void siw_qp_put_ref(struct ib_qp *base_qp)
255 siw_qp_put(to_siw_qp(base_qp));
258 static struct rdma_user_mmap_entry *
259 siw_mmap_entry_insert(struct siw_ucontext *uctx,
260 void *address, size_t length,
263 struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
266 *offset = SIW_INVAL_UOBJ_KEY;
270 entry->address = address;
272 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
280 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
282 return &entry->rdma_entry;
288 * Create QP of requested size on given device.
291 * @attrs: Initial QP attributes.
292 * @udata: used to provide QP ID, SQ and RQ size back to user.
295 int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
296 struct ib_udata *udata)
298 struct ib_pd *pd = ibqp->pd;
299 struct siw_qp *qp = to_siw_qp(ibqp);
300 struct ib_device *base_dev = pd->device;
301 struct siw_device *sdev = to_siw_dev(base_dev);
302 struct siw_ucontext *uctx =
303 rdma_udata_to_drv_context(udata, struct siw_ucontext,
306 int num_sqe, num_rqe, rv = 0;
309 siw_dbg(base_dev, "create new QP\n");
311 if (attrs->create_flags)
314 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
315 siw_dbg(base_dev, "too many QP's\n");
319 if (attrs->qp_type != IB_QPT_RC) {
320 siw_dbg(base_dev, "only RC QP's supported\n");
324 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
325 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
326 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
327 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
328 siw_dbg(base_dev, "QP size error\n");
332 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
333 siw_dbg(base_dev, "max inline send: %d > %d\n",
334 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
339 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
340 * but not for a QP unable to hold any WQE (SQ + RQ)
342 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
343 siw_dbg(base_dev, "QP must have send or receive queue\n");
348 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
349 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
354 init_rwsem(&qp->state_lock);
355 spin_lock_init(&qp->sq_lock);
356 spin_lock_init(&qp->rq_lock);
357 spin_lock_init(&qp->orq_lock);
359 rv = siw_qp_add(sdev, qp);
363 num_sqe = attrs->cap.max_send_wr;
364 num_rqe = attrs->cap.max_recv_wr;
366 /* All queue indices are derived from modulo operations
367 * on a free running 'get' (consumer) and 'put' (producer)
368 * unsigned counter. Having queue sizes at power of two
369 * avoids handling counter wrap around.
372 num_sqe = roundup_pow_of_two(num_sqe);
374 /* Zero sized SQ is not supported */
379 num_rqe = roundup_pow_of_two(num_rqe);
382 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
384 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
386 if (qp->sendq == NULL) {
390 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
391 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
392 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
399 qp->scq = to_siw_cq(attrs->send_cq);
400 qp->rcq = to_siw_cq(attrs->recv_cq);
405 * Verbs 6.3.7: ignore RQ size, if SRQ present
406 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
408 qp->srq = to_siw_srq(attrs->srq);
409 qp->attrs.rq_size = 0;
410 siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
412 } else if (num_rqe) {
415 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
417 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
419 if (qp->recvq == NULL) {
423 qp->attrs.rq_size = num_rqe;
425 qp->attrs.sq_size = num_sqe;
426 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
427 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
429 /* Make those two tunables fixed for now. */
430 qp->tx_ctx.gso_seg_limit = 1;
431 qp->tx_ctx.zcopy_tx = zcopy_tx;
433 qp->attrs.state = SIW_QP_STATE_IDLE;
436 struct siw_uresp_create_qp uresp = {};
438 uresp.num_sqe = num_sqe;
439 uresp.num_rqe = num_rqe;
440 uresp.qp_id = qp_id(qp);
443 length = num_sqe * sizeof(struct siw_sqe);
445 siw_mmap_entry_insert(uctx, qp->sendq,
446 length, &uresp.sq_key);
454 length = num_rqe * sizeof(struct siw_rqe);
456 siw_mmap_entry_insert(uctx, qp->recvq,
457 length, &uresp.rq_key);
459 uresp.sq_key = SIW_INVAL_UOBJ_KEY;
465 if (udata->outlen < sizeof(uresp)) {
469 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
473 qp->tx_cpu = siw_get_tx_cpu(sdev);
474 if (qp->tx_cpu < 0) {
478 INIT_LIST_HEAD(&qp->devq);
479 spin_lock_irqsave(&sdev->lock, flags);
480 list_add_tail(&qp->devq, &sdev->qp_list);
481 spin_unlock_irqrestore(&sdev->lock, flags);
483 init_completion(&qp->qp_free);
488 xa_erase(&sdev->qp_xa, qp_id(qp));
490 rdma_user_mmap_entry_remove(qp->sq_entry);
491 rdma_user_mmap_entry_remove(qp->rq_entry);
497 atomic_dec(&sdev->num_qp);
502 * Minimum siw_query_qp() verb interface.
504 * @qp_attr_mask is not used but all available information is provided
506 int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
507 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
510 struct siw_device *sdev;
512 if (base_qp && qp_attr && qp_init_attr) {
513 qp = to_siw_qp(base_qp);
514 sdev = to_siw_dev(base_qp->device);
518 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
519 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
520 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
521 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
522 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
523 qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
524 qp_attr->max_rd_atomic = qp->attrs.irq_size;
525 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
527 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
528 IB_ACCESS_REMOTE_WRITE |
529 IB_ACCESS_REMOTE_READ;
531 qp_init_attr->qp_type = base_qp->qp_type;
532 qp_init_attr->send_cq = base_qp->send_cq;
533 qp_init_attr->recv_cq = base_qp->recv_cq;
534 qp_init_attr->srq = base_qp->srq;
536 qp_init_attr->cap = qp_attr->cap;
541 int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
542 int attr_mask, struct ib_udata *udata)
544 struct siw_qp_attrs new_attrs;
545 enum siw_qp_attr_mask siw_attr_mask = 0;
546 struct siw_qp *qp = to_siw_qp(base_qp);
552 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
555 memset(&new_attrs, 0, sizeof(new_attrs));
557 if (attr_mask & IB_QP_ACCESS_FLAGS) {
558 siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
560 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
561 new_attrs.flags |= SIW_RDMA_READ_ENABLED;
562 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
563 new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
564 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
565 new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
567 if (attr_mask & IB_QP_STATE) {
568 siw_dbg_qp(qp, "desired IB QP state: %s\n",
569 ib_qp_state_to_string[attr->qp_state]);
571 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
573 if (new_attrs.state > SIW_QP_STATE_RTS)
574 qp->tx_ctx.tx_suspend = 1;
576 siw_attr_mask |= SIW_QP_ATTR_STATE;
581 down_write(&qp->state_lock);
583 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
585 up_write(&qp->state_lock);
590 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
592 struct siw_qp *qp = to_siw_qp(base_qp);
593 struct siw_ucontext *uctx =
594 rdma_udata_to_drv_context(udata, struct siw_ucontext,
596 struct siw_qp_attrs qp_attrs;
598 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
601 * Mark QP as in process of destruction to prevent from
602 * any async callbacks to RDMA core
604 qp->attrs.flags |= SIW_QP_IN_DESTROY;
605 qp->rx_stream.rx_suspend = 1;
608 rdma_user_mmap_entry_remove(qp->sq_entry);
609 rdma_user_mmap_entry_remove(qp->rq_entry);
612 down_write(&qp->state_lock);
614 qp_attrs.state = SIW_QP_STATE_ERROR;
615 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
618 siw_cep_put(qp->cep);
621 up_write(&qp->state_lock);
623 kfree(qp->tx_ctx.mpa_crc_hd);
624 kfree(qp->rx_stream.mpa_crc_hd);
626 qp->scq = qp->rcq = NULL;
629 wait_for_completion(&qp->qp_free);
635 * siw_copy_inline_sgl()
637 * Prepare sgl of inlined data for sending. For userland callers
638 * function checks if given buffer addresses and len's are within
639 * process context bounds.
640 * Data from all provided sge's are copied together into the wqe,
641 * referenced by a single sge.
643 static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
646 struct ib_sge *core_sge = core_wr->sg_list;
647 void *kbuf = &sqe->sge[1];
648 int num_sge = core_wr->num_sge, bytes = 0;
650 sqe->sge[0].laddr = (uintptr_t)kbuf;
651 sqe->sge[0].lkey = 0;
654 if (!core_sge->length) {
658 bytes += core_sge->length;
659 if (bytes > SIW_MAX_INLINE) {
663 memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
666 kbuf += core_sge->length;
669 sqe->sge[0].length = max(bytes, 0);
670 sqe->num_sge = bytes > 0 ? 1 : 0;
675 /* Complete SQ WR's without processing */
676 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
677 const struct ib_send_wr **bad_wr)
682 struct siw_sqe sqe = {};
684 switch (wr->opcode) {
685 case IB_WR_RDMA_WRITE:
686 sqe.opcode = SIW_OP_WRITE;
688 case IB_WR_RDMA_READ:
689 sqe.opcode = SIW_OP_READ;
691 case IB_WR_RDMA_READ_WITH_INV:
692 sqe.opcode = SIW_OP_READ_LOCAL_INV;
695 sqe.opcode = SIW_OP_SEND;
697 case IB_WR_SEND_WITH_IMM:
698 sqe.opcode = SIW_OP_SEND_WITH_IMM;
700 case IB_WR_SEND_WITH_INV:
701 sqe.opcode = SIW_OP_SEND_REMOTE_INV;
703 case IB_WR_LOCAL_INV:
704 sqe.opcode = SIW_OP_INVAL_STAG;
707 sqe.opcode = SIW_OP_REG_MR;
715 rv = siw_sqe_complete(qp, &sqe, 0,
716 SIW_WC_WR_FLUSH_ERR);
728 /* Complete RQ WR's without processing */
729 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
730 const struct ib_recv_wr **bad_wr)
732 struct siw_rqe rqe = {};
737 rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
751 * Post a list of S-WR's to a SQ.
753 * @base_qp: Base QP contained in siw QP
754 * @wr: Null terminated list of user WR's
755 * @bad_wr: Points to failing WR in case of synchronous failure.
757 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
758 const struct ib_send_wr **bad_wr)
760 struct siw_qp *qp = to_siw_qp(base_qp);
761 struct siw_wqe *wqe = tx_wqe(qp);
766 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
767 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
773 * Try to acquire QP state lock. Must be non-blocking
774 * to accommodate kernel clients needs.
776 if (!down_read_trylock(&qp->state_lock)) {
777 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
779 * ERROR state is final, so we can be sure
780 * this state will not change as long as the QP
783 * This handles an ib_drain_sq() call with
784 * a concurrent request to set the QP state
787 rv = siw_sq_flush_wr(qp, wr, bad_wr);
789 siw_dbg_qp(qp, "QP locked, state %d\n",
796 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
797 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
799 * Immediately flush this WR to CQ, if QP
800 * is in ERROR state. SQ is guaranteed to
801 * be empty, so WR complets in-order.
803 * Typically triggered by ib_drain_sq().
805 rv = siw_sq_flush_wr(qp, wr, bad_wr);
807 siw_dbg_qp(qp, "QP out of state %d\n",
812 up_read(&qp->state_lock);
815 spin_lock_irqsave(&qp->sq_lock, flags);
818 u32 idx = qp->sq_put % qp->attrs.sq_size;
819 struct siw_sqe *sqe = &qp->sendq[idx];
822 siw_dbg_qp(qp, "sq full\n");
826 if (wr->num_sge > qp->attrs.sq_max_sges) {
827 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
833 if ((wr->send_flags & IB_SEND_SIGNALED) ||
834 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
835 sqe->flags |= SIW_WQE_SIGNALLED;
837 if (wr->send_flags & IB_SEND_FENCE)
838 sqe->flags |= SIW_WQE_READ_FENCE;
840 switch (wr->opcode) {
842 case IB_WR_SEND_WITH_INV:
843 if (wr->send_flags & IB_SEND_SOLICITED)
844 sqe->flags |= SIW_WQE_SOLICITED;
846 if (!(wr->send_flags & IB_SEND_INLINE)) {
847 siw_copy_sgl(wr->sg_list, sqe->sge,
849 sqe->num_sge = wr->num_sge;
851 rv = siw_copy_inline_sgl(wr, sqe);
856 sqe->flags |= SIW_WQE_INLINE;
859 if (wr->opcode == IB_WR_SEND)
860 sqe->opcode = SIW_OP_SEND;
862 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
863 sqe->rkey = wr->ex.invalidate_rkey;
867 case IB_WR_RDMA_READ_WITH_INV:
868 case IB_WR_RDMA_READ:
870 * iWarp restricts RREAD sink to SGL containing
871 * 1 SGE only. we could relax to SGL with multiple
872 * elements referring the SAME ltag or even sending
873 * a private per-rreq tag referring to a checked
874 * local sgl with MULTIPLE ltag's.
876 if (unlikely(wr->num_sge != 1)) {
880 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
882 * NOTE: zero length RREAD is allowed!
884 sqe->raddr = rdma_wr(wr)->remote_addr;
885 sqe->rkey = rdma_wr(wr)->rkey;
888 if (wr->opcode == IB_WR_RDMA_READ)
889 sqe->opcode = SIW_OP_READ;
891 sqe->opcode = SIW_OP_READ_LOCAL_INV;
894 case IB_WR_RDMA_WRITE:
895 if (!(wr->send_flags & IB_SEND_INLINE)) {
896 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
898 sqe->num_sge = wr->num_sge;
900 rv = siw_copy_inline_sgl(wr, sqe);
901 if (unlikely(rv < 0)) {
905 sqe->flags |= SIW_WQE_INLINE;
908 sqe->raddr = rdma_wr(wr)->remote_addr;
909 sqe->rkey = rdma_wr(wr)->rkey;
910 sqe->opcode = SIW_OP_WRITE;
914 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
915 sqe->rkey = reg_wr(wr)->key;
916 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
917 sqe->opcode = SIW_OP_REG_MR;
920 case IB_WR_LOCAL_INV:
921 sqe->rkey = wr->ex.invalidate_rkey;
922 sqe->opcode = SIW_OP_INVAL_STAG;
926 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
931 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
932 sqe->opcode, sqe->flags,
933 (void *)(uintptr_t)sqe->id);
935 if (unlikely(rv < 0))
938 /* make SQE only valid after completely written */
940 sqe->flags |= SIW_WQE_VALID;
947 * Send directly if SQ processing is not in progress.
948 * Eventual immediate errors (rv < 0) do not affect the involved
949 * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
950 * processing, if new work is already pending. But rv must be passed
953 if (wqe->wr_status != SIW_WR_IDLE) {
954 spin_unlock_irqrestore(&qp->sq_lock, flags);
955 goto skip_direct_sending;
957 rv = siw_activate_tx(qp);
958 spin_unlock_irqrestore(&qp->sq_lock, flags);
961 goto skip_direct_sending;
963 if (rdma_is_kernel_res(&qp->base_qp.res)) {
964 rv = siw_sq_start(qp);
966 qp->tx_ctx.in_syscall = 1;
968 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
969 siw_qp_cm_drop(qp, 0);
971 qp->tx_ctx.in_syscall = 0;
975 up_read(&qp->state_lock);
982 siw_dbg_qp(qp, "error %d\n", rv);
991 * Post a list of R-WR's to a RQ.
993 * @base_qp: Base QP contained in siw QP
994 * @wr: Null terminated list of user WR's
995 * @bad_wr: Points to failing WR in case of synchronous failure.
997 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
998 const struct ib_recv_wr **bad_wr)
1000 struct siw_qp *qp = to_siw_qp(base_qp);
1001 unsigned long flags;
1004 if (qp->srq || qp->attrs.rq_size == 0) {
1008 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
1009 siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
1015 * Try to acquire QP state lock. Must be non-blocking
1016 * to accommodate kernel clients needs.
1018 if (!down_read_trylock(&qp->state_lock)) {
1019 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1021 * ERROR state is final, so we can be sure
1022 * this state will not change as long as the QP
1025 * This handles an ib_drain_rq() call with
1026 * a concurrent request to set the QP state
1029 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1031 siw_dbg_qp(qp, "QP locked, state %d\n",
1038 if (qp->attrs.state > SIW_QP_STATE_RTS) {
1039 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1041 * Immediately flush this WR to CQ, if QP
1042 * is in ERROR state. RQ is guaranteed to
1043 * be empty, so WR complets in-order.
1045 * Typically triggered by ib_drain_rq().
1047 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1049 siw_dbg_qp(qp, "QP out of state %d\n",
1054 up_read(&qp->state_lock);
1058 * Serialize potentially multiple producers.
1059 * Not needed for single threaded consumer side.
1061 spin_lock_irqsave(&qp->rq_lock, flags);
1064 u32 idx = qp->rq_put % qp->attrs.rq_size;
1065 struct siw_rqe *rqe = &qp->recvq[idx];
1068 siw_dbg_qp(qp, "RQ full\n");
1072 if (wr->num_sge > qp->attrs.rq_max_sges) {
1073 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1077 rqe->id = wr->wr_id;
1078 rqe->num_sge = wr->num_sge;
1079 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1081 /* make sure RQE is completely written before valid */
1084 rqe->flags = SIW_WQE_VALID;
1089 spin_unlock_irqrestore(&qp->rq_lock, flags);
1091 up_read(&qp->state_lock);
1094 siw_dbg_qp(qp, "error %d\n", rv);
1097 return rv > 0 ? 0 : rv;
1100 int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
1102 struct siw_cq *cq = to_siw_cq(base_cq);
1103 struct siw_device *sdev = to_siw_dev(base_cq->device);
1104 struct siw_ucontext *ctx =
1105 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1108 siw_dbg_cq(cq, "free CQ resources\n");
1113 rdma_user_mmap_entry_remove(cq->cq_entry);
1115 atomic_dec(&sdev->num_cq);
1124 * Populate CQ of requested size
1126 * @base_cq: CQ as allocated by RDMA midlayer
1127 * @attr: Initial CQ attributes
1128 * @udata: relates to user context
1131 int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1132 struct ib_udata *udata)
1134 struct siw_device *sdev = to_siw_dev(base_cq->device);
1135 struct siw_cq *cq = to_siw_cq(base_cq);
1136 int rv, size = attr->cqe;
1141 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1142 siw_dbg(base_cq->device, "too many CQ's\n");
1146 if (size < 1 || size > sdev->attrs.max_cqe) {
1147 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1151 size = roundup_pow_of_two(size);
1152 cq->base_cq.cqe = size;
1156 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1157 sizeof(struct siw_cq_ctrl));
1159 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1160 sizeof(struct siw_cq_ctrl));
1162 if (cq->queue == NULL) {
1166 get_random_bytes(&cq->id, 4);
1167 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1169 spin_lock_init(&cq->lock);
1171 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1174 struct siw_uresp_create_cq uresp = {};
1175 struct siw_ucontext *ctx =
1176 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1178 size_t length = size * sizeof(struct siw_cqe) +
1179 sizeof(struct siw_cq_ctrl);
1182 siw_mmap_entry_insert(ctx, cq->queue,
1183 length, &uresp.cq_key);
1184 if (!cq->cq_entry) {
1189 uresp.cq_id = cq->id;
1190 uresp.num_cqe = size;
1192 if (udata->outlen < sizeof(uresp)) {
1196 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1203 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1206 struct siw_ucontext *ctx =
1207 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1210 rdma_user_mmap_entry_remove(cq->cq_entry);
1213 atomic_dec(&sdev->num_cq);
1221 * Reap CQ entries if available and copy work completion status into
1222 * array of WC's provided by caller. Returns number of reaped CQE's.
1224 * @base_cq: Base CQ contained in siw CQ.
1225 * @num_cqe: Maximum number of CQE's to reap.
1226 * @wc: Array of work completions to be filled by siw.
1228 int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1230 struct siw_cq *cq = to_siw_cq(base_cq);
1233 for (i = 0; i < num_cqe; i++) {
1234 if (!siw_reap_cqe(cq, wc))
1242 * siw_req_notify_cq()
1244 * Request notification for new CQE's added to that CQ.
1246 * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
1247 * event if a WQE with notification flag set enters the CQ
1248 * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
1249 * event if a WQE enters the CQ.
1250 * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
1251 * number of not reaped CQE's regardless of its notification
1252 * type and current or new CQ notification settings.
1254 * @base_cq: Base CQ contained in siw CQ.
1255 * @flags: Requested notification flags.
1257 int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1259 struct siw_cq *cq = to_siw_cq(base_cq);
1261 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1263 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1265 * Enable CQ event for next solicited completion.
1266 * and make it visible to all associated producers.
1268 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1271 * Enable CQ event for any signalled completion.
1272 * and make it visible to all associated producers.
1274 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1276 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1277 return cq->cq_put - cq->cq_get;
1285 * Release Memory Region.
1287 * @base_mr: Base MR contained in siw MR.
1288 * @udata: points to user context, unused.
1290 int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1292 struct siw_mr *mr = to_siw_mr(base_mr);
1293 struct siw_device *sdev = to_siw_dev(base_mr->device);
1295 siw_dbg_mem(mr->mem, "deregister MR\n");
1297 atomic_dec(&sdev->num_mr);
1299 siw_mr_drop_mem(mr);
1308 * Register Memory Region.
1310 * @pd: Protection Domain
1311 * @start: starting address of MR (virtual address)
1313 * @rnic_va: not used by siw
1314 * @rights: MR access rights
1315 * @udata: user buffer to communicate STag and Key.
1317 struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1318 u64 rnic_va, int rights, struct ib_udata *udata)
1320 struct siw_mr *mr = NULL;
1321 struct siw_umem *umem = NULL;
1322 struct siw_ureq_reg_mr ureq;
1323 struct siw_device *sdev = to_siw_dev(pd->device);
1325 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1328 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1329 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1330 (unsigned long long)len);
1332 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1333 siw_dbg_pd(pd, "too many mr's\n");
1341 if (mem_limit != RLIM_INFINITY) {
1342 unsigned long num_pages =
1343 (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1344 mem_limit >>= PAGE_SHIFT;
1346 if (num_pages > mem_limit - current->mm->locked_vm) {
1347 siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1348 num_pages, mem_limit,
1349 current->mm->locked_vm);
1354 umem = siw_umem_get(start, len, ib_access_writable(rights));
1357 siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1361 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1366 rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1371 struct siw_uresp_reg_mr uresp = {};
1372 struct siw_mem *mem = mr->mem;
1374 if (udata->inlen < sizeof(ureq)) {
1378 rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1382 mr->base_mr.lkey |= ureq.stag_key;
1383 mr->base_mr.rkey |= ureq.stag_key;
1384 mem->stag |= ureq.stag_key;
1385 uresp.stag = mem->stag;
1387 if (udata->outlen < sizeof(uresp)) {
1391 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1395 mr->mem->stag_valid = 1;
1397 return &mr->base_mr;
1400 atomic_dec(&sdev->num_mr);
1403 siw_mr_drop_mem(mr);
1407 siw_umem_release(umem, false);
1412 struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1415 struct siw_device *sdev = to_siw_dev(pd->device);
1416 struct siw_mr *mr = NULL;
1417 struct siw_pbl *pbl = NULL;
1420 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1421 siw_dbg_pd(pd, "too many mr's\n");
1425 if (mr_type != IB_MR_TYPE_MEM_REG) {
1426 siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1430 if (max_sge > SIW_MAX_SGE_PBL) {
1431 siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1435 pbl = siw_pbl_alloc(max_sge);
1438 siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1442 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1447 rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1451 mr->mem->is_pbl = 1;
1453 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1455 return &mr->base_mr;
1458 atomic_dec(&sdev->num_mr);
1464 siw_mr_drop_mem(mr);
1467 siw_dbg_pd(pd, "failed: %d\n", rv);
1472 /* Just used to count number of pages being mapped */
1473 static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1478 int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1479 unsigned int *sg_off)
1481 struct scatterlist *slp;
1482 struct siw_mr *mr = to_siw_mr(base_mr);
1483 struct siw_mem *mem = mr->mem;
1484 struct siw_pbl *pbl = mem->pbl;
1485 struct siw_pble *pble;
1486 unsigned long pbl_size;
1490 siw_dbg_mem(mem, "no PBL allocated\n");
1495 if (pbl->max_buf < num_sle) {
1496 siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1497 mem->pbl->max_buf, num_sle);
1500 for_each_sg(sl, slp, num_sle, i) {
1501 if (sg_dma_len(slp) == 0) {
1502 siw_dbg_mem(mem, "empty SGE\n");
1506 pble->addr = sg_dma_address(slp);
1507 pble->size = sg_dma_len(slp);
1509 pbl_size = pble->size;
1512 /* Merge PBL entries if adjacent */
1513 if (pble->addr + pble->size == sg_dma_address(slp)) {
1514 pble->size += sg_dma_len(slp);
1518 pble->addr = sg_dma_address(slp);
1519 pble->size = sg_dma_len(slp);
1520 pble->pbl_off = pbl_size;
1522 pbl_size += sg_dma_len(slp);
1525 "sge[%d], size %u, addr 0x%p, total %lu\n",
1526 i, pble->size, ib_virt_dma_to_ptr(pble->addr),
1529 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1531 mem->len = base_mr->length;
1532 mem->va = base_mr->iova;
1534 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1535 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1544 * Create a (empty) DMA memory region, where no umem is attached.
1546 struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1548 struct siw_device *sdev = to_siw_dev(pd->device);
1549 struct siw_mr *mr = NULL;
1552 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1553 siw_dbg_pd(pd, "too many mr's\n");
1557 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1562 rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1566 mr->mem->stag_valid = 1;
1568 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1570 return &mr->base_mr;
1576 atomic_dec(&sdev->num_mr);
1584 * Create Shared Receive Queue of attributes @init_attrs
1585 * within protection domain given by @pd.
1587 * @base_srq: Base SRQ contained in siw SRQ.
1588 * @init_attrs: SRQ init attributes.
1589 * @udata: points to user context
1591 int siw_create_srq(struct ib_srq *base_srq,
1592 struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1594 struct siw_srq *srq = to_siw_srq(base_srq);
1595 struct ib_srq_attr *attrs = &init_attrs->attr;
1596 struct siw_device *sdev = to_siw_dev(base_srq->device);
1597 struct siw_ucontext *ctx =
1598 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1602 if (init_attrs->srq_type != IB_SRQT_BASIC)
1605 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1606 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1610 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1611 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1615 srq->max_sge = attrs->max_sge;
1616 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1617 srq->limit = attrs->srq_limit;
1621 srq->is_kernel_res = !udata;
1625 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1627 srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1629 if (srq->recvq == NULL) {
1634 struct siw_uresp_create_srq uresp = {};
1635 size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1638 siw_mmap_entry_insert(ctx, srq->recvq,
1639 length, &uresp.srq_key);
1640 if (!srq->srq_entry) {
1645 uresp.num_rqe = srq->num_rqe;
1647 if (udata->outlen < sizeof(uresp)) {
1651 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1655 spin_lock_init(&srq->lock);
1657 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1664 rdma_user_mmap_entry_remove(srq->srq_entry);
1667 atomic_dec(&sdev->num_srq);
1675 * Modify SRQ. The caller may resize SRQ and/or set/reset notification
1676 * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
1678 * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
1679 * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1681 int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1682 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1684 struct siw_srq *srq = to_siw_srq(base_srq);
1685 unsigned long flags;
1688 spin_lock_irqsave(&srq->lock, flags);
1690 if (attr_mask & IB_SRQ_MAX_WR) {
1691 /* resize request not yet supported */
1695 if (attr_mask & IB_SRQ_LIMIT) {
1696 if (attrs->srq_limit) {
1697 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1705 srq->limit = attrs->srq_limit;
1708 spin_unlock_irqrestore(&srq->lock, flags);
1716 * Query SRQ attributes.
1718 int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1720 struct siw_srq *srq = to_siw_srq(base_srq);
1721 unsigned long flags;
1723 spin_lock_irqsave(&srq->lock, flags);
1725 attrs->max_wr = srq->num_rqe;
1726 attrs->max_sge = srq->max_sge;
1727 attrs->srq_limit = srq->limit;
1729 spin_unlock_irqrestore(&srq->lock, flags);
1738 * It is assumed that the SRQ is not referenced by any
1739 * QP anymore - the code trusts the RDMA core environment to keep track
1742 int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1744 struct siw_srq *srq = to_siw_srq(base_srq);
1745 struct siw_device *sdev = to_siw_dev(base_srq->device);
1746 struct siw_ucontext *ctx =
1747 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1751 rdma_user_mmap_entry_remove(srq->srq_entry);
1753 atomic_dec(&sdev->num_srq);
1758 * siw_post_srq_recv()
1760 * Post a list of receive queue elements to SRQ.
1761 * NOTE: The function does not check or lock a certain SRQ state
1762 * during the post operation. The code simply trusts the
1763 * RDMA core environment.
1765 * @base_srq: Base SRQ contained in siw SRQ
1766 * @wr: List of R-WR's
1767 * @bad_wr: Updated to failing WR if posting fails.
1769 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1770 const struct ib_recv_wr **bad_wr)
1772 struct siw_srq *srq = to_siw_srq(base_srq);
1773 unsigned long flags;
1776 if (unlikely(!srq->is_kernel_res)) {
1777 siw_dbg_pd(base_srq->pd,
1778 "[SRQ]: no kernel post_recv for mapped srq\n");
1783 * Serialize potentially multiple producers.
1784 * Also needed to serialize potentially multiple
1787 spin_lock_irqsave(&srq->lock, flags);
1790 u32 idx = srq->rq_put % srq->num_rqe;
1791 struct siw_rqe *rqe = &srq->recvq[idx];
1794 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1798 if (unlikely(wr->num_sge > srq->max_sge)) {
1799 siw_dbg_pd(base_srq->pd,
1800 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1804 rqe->id = wr->wr_id;
1805 rqe->num_sge = wr->num_sge;
1806 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1808 /* Make sure S-RQE is completely written before valid */
1811 rqe->flags = SIW_WQE_VALID;
1816 spin_unlock_irqrestore(&srq->lock, flags);
1818 if (unlikely(rv < 0)) {
1819 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1825 void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1827 struct ib_event event;
1828 struct ib_qp *base_qp = &qp->base_qp;
1831 * Do not report asynchronous errors on QP which gets
1832 * destroyed via verbs interface (siw_destroy_qp())
1834 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1837 event.event = etype;
1838 event.device = base_qp->device;
1839 event.element.qp = base_qp;
1841 if (base_qp->event_handler) {
1842 siw_dbg_qp(qp, "reporting event %d\n", etype);
1843 base_qp->event_handler(&event, base_qp->qp_context);
1847 void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1849 struct ib_event event;
1850 struct ib_cq *base_cq = &cq->base_cq;
1852 event.event = etype;
1853 event.device = base_cq->device;
1854 event.element.cq = base_cq;
1856 if (base_cq->event_handler) {
1857 siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1858 base_cq->event_handler(&event, base_cq->cq_context);
1862 void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1864 struct ib_event event;
1865 struct ib_srq *base_srq = &srq->base_srq;
1867 event.event = etype;
1868 event.device = base_srq->device;
1869 event.element.srq = base_srq;
1871 if (base_srq->event_handler) {
1872 siw_dbg_pd(srq->base_srq.pd,
1873 "reporting SRQ event %d\n", etype);
1874 base_srq->event_handler(&event, base_srq->srq_context);
1878 void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
1880 struct ib_event event;
1882 event.event = etype;
1883 event.device = &sdev->base_dev;
1884 event.element.port_num = port;
1886 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1888 ib_dispatch_event(&event);