2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
41 #include "usnic_abi.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_fwd.h"
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_transport.h"
49 #include "usnic_ib_verbs.h"
51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
53 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
55 *fw_ver = *((u64 *)fw_ver_str);
58 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
59 struct ib_udata *udata)
61 struct usnic_ib_dev *us_ibdev;
62 struct usnic_ib_create_qp_resp resp;
64 struct vnic_dev_bar *bar;
65 struct usnic_vnic_res_chunk *chunk;
66 struct usnic_ib_qp_grp_flow *default_flow;
69 memset(&resp, 0, sizeof(resp));
71 us_ibdev = qp_grp->vf->pf;
72 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
74 usnic_err("Failed to get pdev of qp_grp %d\n",
79 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
81 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
82 qp_grp->grp_id, pci_name(pdev));
86 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
87 resp.bar_bus_addr = bar->bus_addr;
88 resp.bar_len = bar->len;
90 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
92 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
93 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
96 return PTR_ERR(chunk);
99 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
100 resp.rq_cnt = chunk->cnt;
101 for (i = 0; i < chunk->cnt; i++)
102 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
104 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
106 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
107 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
110 return PTR_ERR(chunk);
113 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
114 resp.wq_cnt = chunk->cnt;
115 for (i = 0; i < chunk->cnt; i++)
116 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
118 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
120 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
121 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
124 return PTR_ERR(chunk);
127 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
128 resp.cq_cnt = chunk->cnt;
129 for (i = 0; i < chunk->cnt; i++)
130 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
132 default_flow = list_first_entry(&qp_grp->flows_lst,
133 struct usnic_ib_qp_grp_flow, link);
134 resp.transport = default_flow->trans_type;
136 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
138 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
145 static struct usnic_ib_qp_grp*
146 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
147 struct usnic_ib_pd *pd,
148 struct usnic_transport_spec *trans_spec,
149 struct usnic_vnic_res_spec *res_spec)
151 struct usnic_ib_vf *vf;
152 struct usnic_vnic *vnic;
153 struct usnic_ib_qp_grp *qp_grp;
154 struct device *dev, **dev_list;
157 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
159 if (list_empty(&us_ibdev->vf_dev_list)) {
160 usnic_info("No vfs to allocate\n");
164 if (usnic_ib_share_vf) {
165 /* Try to find resouces on a used vf which is in pd */
166 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
167 for (i = 0; dev_list[i]; i++) {
169 vf = pci_get_drvdata(to_pci_dev(dev));
170 spin_lock(&vf->lock);
172 if (!usnic_vnic_check_room(vnic, res_spec)) {
173 usnic_dbg("Found used vnic %s from %s\n",
174 us_ibdev->ib_dev.name,
175 pci_name(usnic_vnic_get_pdev(
177 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
182 spin_unlock(&vf->lock);
185 spin_unlock(&vf->lock);
188 usnic_uiom_free_dev_list(dev_list);
191 /* Try to find resources on an unused vf */
192 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
193 spin_lock(&vf->lock);
195 if (vf->qp_grp_ref_cnt == 0 &&
196 usnic_vnic_check_room(vnic, res_spec) == 0) {
197 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
201 spin_unlock(&vf->lock);
204 spin_unlock(&vf->lock);
207 usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
208 return ERR_PTR(-ENOMEM);
211 if (IS_ERR_OR_NULL(qp_grp)) {
212 usnic_err("Failed to allocate qp_grp\n");
213 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
218 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
220 struct usnic_ib_vf *vf = qp_grp->vf;
222 WARN_ON(qp_grp->state != IB_QPS_RESET);
224 spin_lock(&vf->lock);
225 usnic_ib_qp_grp_destroy(qp_grp);
226 spin_unlock(&vf->lock);
229 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
232 if (speed <= 10000) {
233 *active_width = IB_WIDTH_1X;
234 *active_speed = IB_SPEED_FDR10;
235 } else if (speed <= 20000) {
236 *active_width = IB_WIDTH_4X;
237 *active_speed = IB_SPEED_DDR;
238 } else if (speed <= 30000) {
239 *active_width = IB_WIDTH_4X;
240 *active_speed = IB_SPEED_QDR;
241 } else if (speed <= 40000) {
242 *active_width = IB_WIDTH_4X;
243 *active_speed = IB_SPEED_FDR10;
245 *active_width = IB_WIDTH_4X;
246 *active_speed = IB_SPEED_EDR;
250 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
252 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
253 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
259 /* Start of ib callback functions */
261 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
264 return IB_LINK_LAYER_ETHERNET;
267 int usnic_ib_query_device(struct ib_device *ibdev,
268 struct ib_device_attr *props,
269 struct ib_udata *uhw)
271 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
273 struct ethtool_drvinfo info;
277 if (uhw->inlen || uhw->outlen)
280 mutex_lock(&us_ibdev->usdev_lock);
281 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
282 memset(props, 0, sizeof(*props));
283 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
285 memcpy(&props->sys_image_guid, &gid.global.interface_id,
286 sizeof(gid.global.interface_id));
287 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
288 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
289 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
290 props->vendor_id = PCI_VENDOR_ID_CISCO;
291 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
292 props->hw_ver = us_ibdev->pdev->subsystem_device;
293 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
294 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
295 props->max_qp = qp_per_vf *
296 kref_read(&us_ibdev->vf_cnt);
297 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
298 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
299 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
300 kref_read(&us_ibdev->vf_cnt);
301 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
302 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
303 props->local_ca_ack_delay = 0;
304 props->max_pkeys = 0;
305 props->atomic_cap = IB_ATOMIC_NONE;
306 props->masked_atomic_cap = props->atomic_cap;
307 props->max_qp_rd_atom = 0;
308 props->max_qp_init_rd_atom = 0;
309 props->max_res_rd_atom = 0;
311 props->max_srq_wr = 0;
312 props->max_srq_sge = 0;
313 props->max_fast_reg_page_list_len = 0;
314 props->max_mcast_grp = 0;
315 props->max_mcast_qp_attach = 0;
316 props->max_total_mcast_qp_attach = 0;
317 props->max_map_per_fmr = 0;
318 /* Owned by Userspace
319 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
320 mutex_unlock(&us_ibdev->usdev_lock);
325 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
326 struct ib_port_attr *props)
328 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
329 struct ethtool_link_ksettings cmd;
333 mutex_lock(&us_ibdev->usdev_lock);
334 __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
335 /* props being zeroed by the caller, avoid zeroing it here */
342 if (!us_ibdev->ufdev->link_up) {
343 props->state = IB_PORT_DOWN;
344 props->phys_state = 3;
345 } else if (!us_ibdev->ufdev->inaddr) {
346 props->state = IB_PORT_INIT;
347 props->phys_state = 4;
349 props->state = IB_PORT_ACTIVE;
350 props->phys_state = 5;
353 props->port_cap_flags = 0;
354 props->gid_tbl_len = 1;
355 props->pkey_tbl_len = 1;
356 props->bad_pkey_cntr = 0;
357 props->qkey_viol_cntr = 0;
358 eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
359 &props->active_width);
360 props->max_mtu = IB_MTU_4096;
361 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
362 /* Userspace will adjust for hdrs */
363 props->max_msg_sz = us_ibdev->ufdev->mtu;
364 props->max_vl_num = 1;
365 mutex_unlock(&us_ibdev->usdev_lock);
370 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
372 struct ib_qp_init_attr *qp_init_attr)
374 struct usnic_ib_qp_grp *qp_grp;
375 struct usnic_ib_vf *vf;
380 memset(qp_attr, 0, sizeof(*qp_attr));
381 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
383 qp_grp = to_uqp_grp(qp);
385 mutex_lock(&vf->pf->usdev_lock);
387 qp_attr->qp_state = qp_grp->state;
388 qp_attr->cur_qp_state = qp_grp->state;
390 switch (qp_grp->ibqp.qp_type) {
395 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
400 mutex_unlock(&vf->pf->usdev_lock);
404 mutex_unlock(&vf->pf->usdev_lock);
408 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
412 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
418 mutex_lock(&us_ibdev->usdev_lock);
419 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
420 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
422 mutex_unlock(&us_ibdev->usdev_lock);
427 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
437 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
438 struct ib_ucontext *context,
439 struct ib_udata *udata)
441 struct usnic_ib_pd *pd;
446 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
448 return ERR_PTR(-ENOMEM);
450 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
451 if (IS_ERR_OR_NULL(umem_pd)) {
453 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
456 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
457 pd, context, ibdev->name);
461 int usnic_ib_dealloc_pd(struct ib_pd *pd)
463 usnic_info("freeing domain 0x%p\n", pd);
465 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
470 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
471 struct ib_qp_init_attr *init_attr,
472 struct ib_udata *udata)
475 struct usnic_ib_dev *us_ibdev;
476 struct usnic_ib_qp_grp *qp_grp;
477 struct usnic_ib_ucontext *ucontext;
479 struct usnic_vnic_res_spec res_spec;
480 struct usnic_ib_create_qp_cmd cmd;
481 struct usnic_transport_spec trans_spec;
485 ucontext = to_uucontext(pd->uobject->context);
486 us_ibdev = to_usdev(pd->device);
488 if (init_attr->create_flags)
489 return ERR_PTR(-EINVAL);
491 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
493 usnic_err("%s: cannot copy udata for create_qp\n",
494 us_ibdev->ib_dev.name);
495 return ERR_PTR(-EINVAL);
498 err = create_qp_validate_user_data(cmd);
500 usnic_err("%s: Failed to validate user data\n",
501 us_ibdev->ib_dev.name);
502 return ERR_PTR(-EINVAL);
505 if (init_attr->qp_type != IB_QPT_UD) {
506 usnic_err("%s asked to make a non-UD QP: %d\n",
507 us_ibdev->ib_dev.name, init_attr->qp_type);
508 return ERR_PTR(-EINVAL);
511 trans_spec = cmd.spec;
512 mutex_lock(&us_ibdev->usdev_lock);
513 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
514 res_spec = min_transport_spec[trans_spec.trans_type];
515 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
516 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
519 if (IS_ERR_OR_NULL(qp_grp)) {
520 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
521 goto out_release_mutex;
524 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
527 goto out_release_qp_grp;
530 qp_grp->ctx = ucontext;
531 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
532 usnic_ib_log_vf(qp_grp->vf);
533 mutex_unlock(&us_ibdev->usdev_lock);
534 return &qp_grp->ibqp;
537 qp_grp_destroy(qp_grp);
539 mutex_unlock(&us_ibdev->usdev_lock);
543 int usnic_ib_destroy_qp(struct ib_qp *qp)
545 struct usnic_ib_qp_grp *qp_grp;
546 struct usnic_ib_vf *vf;
550 qp_grp = to_uqp_grp(qp);
552 mutex_lock(&vf->pf->usdev_lock);
553 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
554 usnic_err("Failed to move qp grp %u to reset\n",
558 list_del(&qp_grp->link);
559 qp_grp_destroy(qp_grp);
560 mutex_unlock(&vf->pf->usdev_lock);
565 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
566 int attr_mask, struct ib_udata *udata)
568 struct usnic_ib_qp_grp *qp_grp;
572 qp_grp = to_uqp_grp(ibqp);
574 mutex_lock(&qp_grp->vf->pf->usdev_lock);
575 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
576 /* usnic devices only have one port */
580 if (attr_mask & IB_QP_STATE) {
581 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
583 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
588 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
592 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
593 const struct ib_cq_init_attr *attr,
594 struct ib_ucontext *context,
595 struct ib_udata *udata)
601 return ERR_PTR(-EINVAL);
603 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
605 return ERR_PTR(-EBUSY);
610 int usnic_ib_destroy_cq(struct ib_cq *cq)
617 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
618 u64 virt_addr, int access_flags,
619 struct ib_udata *udata)
621 struct usnic_ib_mr *mr;
624 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
627 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
629 return ERR_PTR(-ENOMEM);
631 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
633 if (IS_ERR_OR_NULL(mr->umem)) {
634 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
638 mr->ibmr.lkey = mr->ibmr.rkey = 0;
646 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
648 struct usnic_ib_mr *mr = to_umr(ibmr);
650 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
652 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
657 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
658 struct ib_udata *udata)
660 struct usnic_ib_ucontext *context;
661 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
664 context = kmalloc(sizeof(*context), GFP_KERNEL);
666 return ERR_PTR(-ENOMEM);
668 INIT_LIST_HEAD(&context->qp_grp_list);
669 mutex_lock(&us_ibdev->usdev_lock);
670 list_add_tail(&context->link, &us_ibdev->ctx_list);
671 mutex_unlock(&us_ibdev->usdev_lock);
673 return &context->ibucontext;
676 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
678 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
679 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
682 mutex_lock(&us_ibdev->usdev_lock);
683 BUG_ON(!list_empty(&context->qp_grp_list));
684 list_del(&context->link);
685 mutex_unlock(&us_ibdev->usdev_lock);
690 int usnic_ib_mmap(struct ib_ucontext *context,
691 struct vm_area_struct *vma)
693 struct usnic_ib_ucontext *uctx = to_ucontext(context);
694 struct usnic_ib_dev *us_ibdev;
695 struct usnic_ib_qp_grp *qp_grp;
696 struct usnic_ib_vf *vf;
697 struct vnic_dev_bar *bar;
704 us_ibdev = to_usdev(context->device);
705 vma->vm_flags |= VM_IO;
706 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
707 vfid = vma->vm_pgoff;
708 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
709 vma->vm_pgoff, PAGE_SHIFT, vfid);
711 mutex_lock(&us_ibdev->usdev_lock);
712 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
714 if (usnic_vnic_get_index(vf->vnic) == vfid) {
715 bar = usnic_vnic_get_bar(vf->vnic, 0);
716 if ((vma->vm_end - vma->vm_start) != bar->len) {
717 usnic_err("Bar0 Len %lu - Request map %lu\n",
719 vma->vm_end - vma->vm_start);
720 mutex_unlock(&us_ibdev->usdev_lock);
723 bus_addr = bar->bus_addr;
725 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
726 &bus_addr, bar->vaddr, bar->len);
727 mutex_unlock(&us_ibdev->usdev_lock);
729 return remap_pfn_range(vma,
731 bus_addr >> PAGE_SHIFT,
732 len, vma->vm_page_prot);
736 mutex_unlock(&us_ibdev->usdev_lock);
737 usnic_err("No VF %u found\n", vfid);
741 /* In ib callbacks section - Start of stub funcs */
742 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
743 struct rdma_ah_attr *ah_attr,
744 struct ib_udata *udata)
748 return ERR_PTR(-EPERM);
751 int usnic_ib_destroy_ah(struct ib_ah *ah)
757 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
758 struct ib_send_wr **bad_wr)
764 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
765 struct ib_recv_wr **bad_wr)
771 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
778 int usnic_ib_req_notify_cq(struct ib_cq *cq,
779 enum ib_cq_notify_flags flags)
785 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
788 return ERR_PTR(-ENOMEM);
792 /* In ib callbacks section - End of stub funcs */
793 /* End of ib callbacks section */