1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/uverbs_ioctl.h>
53 #include "ocrdma_hw.h"
54 #include "ocrdma_verbs.h"
55 #include <rdma/ocrdma-abi.h>
57 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
66 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
69 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
71 if (uhw->inlen || uhw->outlen)
74 memset(attr, 0, sizeof *attr);
75 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
76 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
77 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
78 attr->max_mr_size = dev->attr.max_mr_size;
79 attr->page_size_cap = 0xffff000;
80 attr->vendor_id = dev->nic_info.pdev->vendor;
81 attr->vendor_part_id = dev->nic_info.pdev->device;
82 attr->hw_ver = dev->asic_id;
83 attr->max_qp = dev->attr.max_qp;
84 attr->max_ah = OCRDMA_MAX_AH;
85 attr->max_qp_wr = dev->attr.max_wqe;
87 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
88 IB_DEVICE_RC_RNR_NAK_GEN |
89 IB_DEVICE_SHUTDOWN_PORT |
90 IB_DEVICE_SYS_IMAGE_GUID |
91 IB_DEVICE_LOCAL_DMA_LKEY |
92 IB_DEVICE_MEM_MGT_EXTENSIONS;
93 attr->max_send_sge = dev->attr.max_send_sge;
94 attr->max_recv_sge = dev->attr.max_recv_sge;
95 attr->max_sge_rd = dev->attr.max_rdma_sge;
96 attr->max_cq = dev->attr.max_cq;
97 attr->max_cqe = dev->attr.max_cqe;
98 attr->max_mr = dev->attr.max_mr;
99 attr->max_mw = dev->attr.max_mw;
100 attr->max_pd = dev->attr.max_pd;
101 attr->atomic_cap = 0;
103 attr->max_map_per_fmr = 0;
104 attr->max_qp_rd_atom =
105 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
106 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
107 attr->max_srq = dev->attr.max_srq;
108 attr->max_srq_sge = dev->attr.max_srq_sge;
109 attr->max_srq_wr = dev->attr.max_rqe;
110 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
111 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
116 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
117 u8 *ib_speed, u8 *ib_width)
122 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
124 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
127 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
128 *ib_speed = IB_SPEED_SDR;
129 *ib_width = IB_WIDTH_1X;
132 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
133 *ib_speed = IB_SPEED_QDR;
134 *ib_width = IB_WIDTH_1X;
137 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
138 *ib_speed = IB_SPEED_DDR;
139 *ib_width = IB_WIDTH_4X;
142 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
143 *ib_speed = IB_SPEED_QDR;
144 *ib_width = IB_WIDTH_4X;
149 *ib_speed = IB_SPEED_SDR;
150 *ib_width = IB_WIDTH_1X;
154 int ocrdma_query_port(struct ib_device *ibdev,
155 u8 port, struct ib_port_attr *props)
157 enum ib_port_state port_state;
158 struct ocrdma_dev *dev;
159 struct net_device *netdev;
161 /* props being zeroed by the caller, avoid zeroing it here */
162 dev = get_ocrdma_dev(ibdev);
163 netdev = dev->nic_info.netdev;
164 if (netif_running(netdev) && netif_oper_up(netdev)) {
165 port_state = IB_PORT_ACTIVE;
166 props->phys_state = 5;
168 port_state = IB_PORT_DOWN;
169 props->phys_state = 3;
171 props->max_mtu = IB_MTU_4096;
172 props->active_mtu = iboe_get_mtu(netdev->mtu);
177 props->state = port_state;
178 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
179 IB_PORT_DEVICE_MGMT_SUP |
180 IB_PORT_VENDOR_CLASS_SUP;
181 props->ip_gids = true;
182 props->gid_tbl_len = OCRDMA_MAX_SGID;
183 props->pkey_tbl_len = 1;
184 props->bad_pkey_cntr = 0;
185 props->qkey_viol_cntr = 0;
186 get_link_speed_and_width(dev, &props->active_speed,
187 &props->active_width);
188 props->max_msg_sz = 0x80000000;
189 props->max_vl_num = 4;
193 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
194 struct ib_port_modify *props)
199 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
202 struct ocrdma_mm *mm;
204 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
207 mm->key.phy_addr = phy_addr;
209 INIT_LIST_HEAD(&mm->entry);
211 mutex_lock(&uctx->mm_list_lock);
212 list_add_tail(&mm->entry, &uctx->mm_head);
213 mutex_unlock(&uctx->mm_list_lock);
217 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
220 struct ocrdma_mm *mm, *tmp;
222 mutex_lock(&uctx->mm_list_lock);
223 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
224 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
227 list_del(&mm->entry);
231 mutex_unlock(&uctx->mm_list_lock);
234 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
238 struct ocrdma_mm *mm;
240 mutex_lock(&uctx->mm_list_lock);
241 list_for_each_entry(mm, &uctx->mm_head, entry) {
242 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
248 mutex_unlock(&uctx->mm_list_lock);
253 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
255 u16 pd_bitmap_idx = 0;
256 const unsigned long *pd_bitmap;
259 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
260 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
261 dev->pd_mgr->max_dpp_pd);
262 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
263 dev->pd_mgr->pd_dpp_count++;
264 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
265 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
267 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
268 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
269 dev->pd_mgr->max_normal_pd);
270 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
271 dev->pd_mgr->pd_norm_count++;
272 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
273 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
275 return pd_bitmap_idx;
278 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
284 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
285 dev->pd_mgr->pd_norm_count;
290 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
291 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
294 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
295 dev->pd_mgr->pd_dpp_count--;
298 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
299 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
302 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
303 dev->pd_mgr->pd_norm_count--;
310 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
315 mutex_lock(&dev->dev_lock);
316 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
317 mutex_unlock(&dev->dev_lock);
321 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
326 mutex_lock(&dev->dev_lock);
327 if (pd->dpp_enabled) {
328 /* try allocating DPP PD, if not available then normal PD */
329 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
330 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
331 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
332 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
333 } else if (dev->pd_mgr->pd_norm_count <
334 dev->pd_mgr->max_normal_pd) {
335 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
336 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
337 pd->dpp_enabled = false;
342 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
343 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
344 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
349 mutex_unlock(&dev->dev_lock);
356 * ocrdma_ucontext must be used here because this function is also
357 * called from ocrdma_alloc_ucontext where ib_udata does not have
358 * valid ib_ucontext pointer. ib_uverbs_get_context does not call
359 * uobj_{alloc|get_xxx} helpers which are used to store the
360 * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
361 * ib_udata does NOT imply valid ib_ucontext here!
363 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
364 struct ocrdma_ucontext *uctx,
365 struct ib_udata *udata)
369 if (udata && uctx && dev->attr.max_dpp_pds) {
371 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
373 pd->dpp_enabled ? (dev->nic_info.db_page_size /
374 dev->attr.wqe_size) : 0;
377 if (dev->pd_mgr->pd_prealloc_valid)
378 return ocrdma_get_pd_num(dev, pd);
381 status = ocrdma_mbx_alloc_pd(dev, pd);
383 if (pd->dpp_enabled) {
384 pd->dpp_enabled = false;
394 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
395 struct ocrdma_pd *pd)
397 return (uctx->cntxt_pd == pd);
400 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
401 struct ocrdma_pd *pd)
403 if (dev->pd_mgr->pd_prealloc_valid)
404 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
406 ocrdma_mbx_dealloc_pd(dev, pd);
409 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
410 struct ocrdma_ucontext *uctx,
411 struct ib_udata *udata)
413 struct ib_device *ibdev = &dev->ibdev;
417 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
422 uctx->cntxt_pd = get_ocrdma_pd(pd);
424 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
426 kfree(uctx->cntxt_pd);
430 uctx->cntxt_pd->uctx = uctx;
431 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
436 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
438 struct ocrdma_pd *pd = uctx->cntxt_pd;
439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
441 if (uctx->pd_in_use) {
442 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
443 __func__, dev->id, pd->id);
445 kfree(uctx->cntxt_pd);
446 uctx->cntxt_pd = NULL;
447 _ocrdma_dealloc_pd(dev, pd);
450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
452 struct ocrdma_pd *pd = NULL;
454 mutex_lock(&uctx->mm_list_lock);
455 if (!uctx->pd_in_use) {
456 uctx->pd_in_use = true;
459 mutex_unlock(&uctx->mm_list_lock);
464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
466 mutex_lock(&uctx->mm_list_lock);
467 uctx->pd_in_use = false;
468 mutex_unlock(&uctx->mm_list_lock);
471 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
473 struct ib_device *ibdev = uctx->device;
475 struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
476 struct ocrdma_alloc_ucontext_resp resp = {};
477 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
478 struct pci_dev *pdev = dev->nic_info.pdev;
479 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
483 INIT_LIST_HEAD(&ctx->mm_head);
484 mutex_init(&ctx->mm_list_lock);
486 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
487 &ctx->ah_tbl.pa, GFP_KERNEL);
491 ctx->ah_tbl.len = map_len;
493 resp.ah_tbl_len = ctx->ah_tbl.len;
494 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
496 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
500 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
504 resp.dev_id = dev->id;
505 resp.max_inline_data = dev->attr.max_inline_data;
506 resp.wqe_size = dev->attr.wqe_size;
507 resp.rqe_size = dev->attr.rqe_size;
508 resp.dpp_wqe_size = dev->attr.wqe_size;
510 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
511 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
517 ocrdma_dealloc_ucontext_pd(ctx);
519 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
521 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
526 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
528 struct ocrdma_mm *mm, *tmp;
529 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
530 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
531 struct pci_dev *pdev = dev->nic_info.pdev;
533 ocrdma_dealloc_ucontext_pd(uctx);
535 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
536 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
539 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
540 list_del(&mm->entry);
545 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
547 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
548 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
549 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
550 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
551 unsigned long len = (vma->vm_end - vma->vm_start);
555 if (vma->vm_start & (PAGE_SIZE - 1))
557 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
561 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
562 dev->nic_info.db_total_size)) &&
563 (len <= dev->nic_info.db_page_size)) {
564 if (vma->vm_flags & VM_READ)
567 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
568 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
569 len, vma->vm_page_prot);
570 } else if (dev->nic_info.dpp_unmapped_len &&
571 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
572 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
573 dev->nic_info.dpp_unmapped_len)) &&
574 (len <= dev->nic_info.dpp_unmapped_len)) {
575 if (vma->vm_flags & VM_READ)
578 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
579 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
580 len, vma->vm_page_prot);
582 status = remap_pfn_range(vma, vma->vm_start,
583 vma->vm_pgoff, len, vma->vm_page_prot);
588 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
589 struct ib_udata *udata)
593 u64 dpp_page_addr = 0;
595 struct ocrdma_alloc_pd_uresp rsp;
596 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
597 udata, struct ocrdma_ucontext, ibucontext);
599 memset(&rsp, 0, sizeof(rsp));
601 rsp.dpp_enabled = pd->dpp_enabled;
602 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
603 db_page_size = dev->nic_info.db_page_size;
605 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
609 if (pd->dpp_enabled) {
610 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
611 (pd->id * PAGE_SIZE);
612 status = ocrdma_add_mmap(uctx, dpp_page_addr,
616 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
617 rsp.dpp_page_addr_lo = dpp_page_addr;
620 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
629 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
631 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
635 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
637 struct ib_device *ibdev = ibpd->device;
638 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
639 struct ocrdma_pd *pd;
641 u8 is_uctx_pd = false;
642 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
643 udata, struct ocrdma_ucontext, ibucontext);
646 pd = ocrdma_get_ucontext_pd(uctx);
653 pd = get_ocrdma_pd(ibpd);
654 status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
660 status = ocrdma_copy_pd_uresp(dev, pd, udata);
668 ocrdma_release_ucontext_pd(uctx);
670 _ocrdma_dealloc_pd(dev, pd);
675 void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
677 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
678 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
679 struct ocrdma_ucontext *uctx = NULL;
684 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
685 (pd->id * PAGE_SIZE);
687 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
688 usr_db = ocrdma_get_db_addr(dev, pd->id);
689 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
691 if (is_ucontext_pd(uctx, pd)) {
692 ocrdma_release_ucontext_pd(uctx);
696 _ocrdma_dealloc_pd(dev, pd);
699 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
700 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
705 mr->hwmr.local_rd = 1;
706 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
707 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
708 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
709 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
710 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
711 mr->hwmr.num_pbls = num_pbls;
713 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
717 mr->ibmr.lkey = mr->hwmr.lkey;
718 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
719 mr->ibmr.rkey = mr->hwmr.lkey;
723 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
726 struct ocrdma_mr *mr;
727 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
728 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
730 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
731 pr_err("%s err, invalid access rights\n", __func__);
732 return ERR_PTR(-EINVAL);
735 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
737 return ERR_PTR(-ENOMEM);
739 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
740 OCRDMA_ADDR_CHECK_DISABLE);
743 return ERR_PTR(status);
749 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
750 struct ocrdma_hw_mr *mr)
752 struct pci_dev *pdev = dev->nic_info.pdev;
756 for (i = 0; i < mr->num_pbls; i++) {
757 if (!mr->pbl_table[i].va)
759 dma_free_coherent(&pdev->dev, mr->pbl_size,
761 mr->pbl_table[i].pa);
763 kfree(mr->pbl_table);
764 mr->pbl_table = NULL;
768 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
777 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
778 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
782 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
783 num_pbls = num_pbls / (pbl_size / sizeof(u64));
785 } while (num_pbls >= dev->attr.max_num_mr_pbl);
787 mr->hwmr.num_pbes = num_pbes;
788 mr->hwmr.num_pbls = num_pbls;
789 mr->hwmr.pbl_size = pbl_size;
793 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
797 u32 dma_len = mr->pbl_size;
798 struct pci_dev *pdev = dev->nic_info.pdev;
802 mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
808 for (i = 0; i < mr->num_pbls; i++) {
809 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
811 ocrdma_free_mr_pbl_tbl(dev, mr);
815 mr->pbl_table[i].va = va;
816 mr->pbl_table[i].pa = pa;
821 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
824 struct ocrdma_pbe *pbe;
825 struct sg_dma_page_iter sg_iter;
826 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
827 struct ib_umem *umem = mr->umem;
828 int pbe_cnt, total_num_pbes = 0;
831 if (!mr->hwmr.num_pbes)
834 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
837 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
838 /* store the page address in pbe */
839 pg_addr = sg_page_iter_dma_address(&sg_iter);
840 pbe->pa_lo = cpu_to_le32(pg_addr);
841 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
846 /* if done building pbes, issue the mbx cmd. */
847 if (total_num_pbes == num_pbes)
850 /* if the given pbl is full storing the pbes,
853 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
855 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
861 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
862 u64 usr_addr, int acc, struct ib_udata *udata)
864 int status = -ENOMEM;
865 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
866 struct ocrdma_mr *mr;
867 struct ocrdma_pd *pd;
870 pd = get_ocrdma_pd(ibpd);
872 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
873 return ERR_PTR(-EINVAL);
875 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
877 return ERR_PTR(status);
878 mr->umem = ib_umem_get(udata, start, len, acc, 0);
879 if (IS_ERR(mr->umem)) {
883 num_pbes = ib_umem_page_count(mr->umem);
884 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
888 mr->hwmr.pbe_size = PAGE_SIZE;
889 mr->hwmr.fbo = ib_umem_offset(mr->umem);
890 mr->hwmr.va = usr_addr;
892 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
893 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
894 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
895 mr->hwmr.local_rd = 1;
896 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
897 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
900 build_user_pbes(dev, mr, num_pbes);
901 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
904 mr->ibmr.lkey = mr->hwmr.lkey;
905 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
906 mr->ibmr.rkey = mr->hwmr.lkey;
911 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
914 return ERR_PTR(status);
917 int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
919 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
920 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
922 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
925 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
927 /* it could be user registered memory. */
928 ib_umem_release(mr->umem);
931 /* Don't stop cleanup, in case FW is unresponsive */
932 if (dev->mqe_ctx.fw_error_state) {
933 pr_err("%s(%d) fw not responding.\n",
939 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
940 struct ib_udata *udata)
943 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
944 udata, struct ocrdma_ucontext, ibucontext);
945 struct ocrdma_create_cq_uresp uresp;
947 /* this must be user flow! */
951 memset(&uresp, 0, sizeof(uresp));
952 uresp.cq_id = cq->id;
953 uresp.page_size = PAGE_ALIGN(cq->len);
955 uresp.max_hw_cqe = cq->max_hw_cqe;
956 uresp.page_addr[0] = virt_to_phys(cq->va);
957 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
958 uresp.db_page_size = dev->nic_info.db_page_size;
959 uresp.phase_change = cq->phase_change ? 1 : 0;
960 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
962 pr_err("%s(%d) copy error cqid=0x%x.\n",
963 __func__, dev->id, cq->id);
966 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
969 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
971 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
979 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
980 struct ib_udata *udata)
982 struct ib_device *ibdev = ibcq->device;
983 int entries = attr->cqe;
984 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
985 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
986 struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
987 udata, struct ocrdma_ucontext, ibucontext);
990 struct ocrdma_create_cq_ureq ureq;
996 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1001 spin_lock_init(&cq->cq_lock);
1002 spin_lock_init(&cq->comp_handler_lock);
1003 INIT_LIST_HEAD(&cq->sq_head);
1004 INIT_LIST_HEAD(&cq->rq_head);
1007 pd_id = uctx->cntxt_pd->id;
1009 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1014 status = ocrdma_copy_cq_uresp(dev, cq, udata);
1018 cq->phase = OCRDMA_CQE_VALID;
1019 dev->cq_tbl[cq->id] = cq;
1023 ocrdma_mbx_destroy_cq(dev, cq);
1027 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1028 struct ib_udata *udata)
1031 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1033 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1037 ibcq->cqe = new_cnt;
1041 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1044 int valid_count = 0;
1045 unsigned long flags;
1047 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1048 struct ocrdma_cqe *cqe = NULL;
1051 cqe_cnt = cq->cqe_cnt;
1053 /* Last irq might have scheduled a polling thread
1054 * sync-up with it before hard flushing.
1056 spin_lock_irqsave(&cq->cq_lock, flags);
1058 if (is_cqe_valid(cq, cqe))
1063 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1064 spin_unlock_irqrestore(&cq->cq_lock, flags);
1067 void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1069 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1070 struct ocrdma_eq *eq = NULL;
1071 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1075 dev->cq_tbl[cq->id] = NULL;
1076 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1078 eq = &dev->eq_tbl[indx];
1079 irq = ocrdma_get_irq(dev, eq);
1080 synchronize_irq(irq);
1081 ocrdma_flush_cq(cq);
1083 ocrdma_mbx_destroy_cq(dev, cq);
1085 pdid = cq->ucontext->cntxt_pd->id;
1086 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1087 PAGE_ALIGN(cq->len));
1088 ocrdma_del_mmap(cq->ucontext,
1089 ocrdma_get_db_addr(dev, pdid),
1090 dev->nic_info.db_page_size);
1094 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1096 int status = -EINVAL;
1098 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1099 dev->qp_tbl[qp->id] = qp;
1105 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1107 dev->qp_tbl[qp->id] = NULL;
1110 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1111 struct ib_qp_init_attr *attrs,
1112 struct ib_udata *udata)
1114 if ((attrs->qp_type != IB_QPT_GSI) &&
1115 (attrs->qp_type != IB_QPT_RC) &&
1116 (attrs->qp_type != IB_QPT_UC) &&
1117 (attrs->qp_type != IB_QPT_UD)) {
1118 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1119 __func__, dev->id, attrs->qp_type);
1122 /* Skip the check for QP1 to support CM size of 128 */
1123 if ((attrs->qp_type != IB_QPT_GSI) &&
1124 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1125 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1126 __func__, dev->id, attrs->cap.max_send_wr);
1127 pr_err("%s(%d) supported send_wr=0x%x\n",
1128 __func__, dev->id, dev->attr.max_wqe);
1131 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1132 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1133 __func__, dev->id, attrs->cap.max_recv_wr);
1134 pr_err("%s(%d) supported recv_wr=0x%x\n",
1135 __func__, dev->id, dev->attr.max_rqe);
1138 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1139 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1140 __func__, dev->id, attrs->cap.max_inline_data);
1141 pr_err("%s(%d) supported inline data size=0x%x\n",
1142 __func__, dev->id, dev->attr.max_inline_data);
1145 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1146 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1147 __func__, dev->id, attrs->cap.max_send_sge);
1148 pr_err("%s(%d) supported send_sge=0x%x\n",
1149 __func__, dev->id, dev->attr.max_send_sge);
1152 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1153 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1154 __func__, dev->id, attrs->cap.max_recv_sge);
1155 pr_err("%s(%d) supported recv_sge=0x%x\n",
1156 __func__, dev->id, dev->attr.max_recv_sge);
1159 /* unprivileged user space cannot create special QP */
1160 if (udata && attrs->qp_type == IB_QPT_GSI) {
1162 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1163 __func__, dev->id, attrs->qp_type);
1166 /* allow creating only one GSI type of QP */
1167 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1168 pr_err("%s(%d) GSI special QPs already created.\n",
1172 /* verify consumer QPs are not trying to use GSI QP's CQ */
1173 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1174 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1175 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1176 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1184 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1185 struct ib_udata *udata, int dpp_offset,
1186 int dpp_credit_lmt, int srq)
1190 struct ocrdma_create_qp_uresp uresp;
1191 struct ocrdma_pd *pd = qp->pd;
1192 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1194 memset(&uresp, 0, sizeof(uresp));
1195 usr_db = dev->nic_info.unmapped_db +
1196 (pd->id * dev->nic_info.db_page_size);
1197 uresp.qp_id = qp->id;
1198 uresp.sq_dbid = qp->sq.dbid;
1199 uresp.num_sq_pages = 1;
1200 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1201 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1202 uresp.num_wqe_allocated = qp->sq.max_cnt;
1204 uresp.rq_dbid = qp->rq.dbid;
1205 uresp.num_rq_pages = 1;
1206 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1207 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1208 uresp.num_rqe_allocated = qp->rq.max_cnt;
1210 uresp.db_page_addr = usr_db;
1211 uresp.db_page_size = dev->nic_info.db_page_size;
1212 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1213 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1214 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1216 if (qp->dpp_enabled) {
1217 uresp.dpp_credit = dpp_credit_lmt;
1218 uresp.dpp_offset = dpp_offset;
1220 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1222 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1225 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1226 uresp.sq_page_size);
1231 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1232 uresp.rq_page_size);
1238 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1243 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1244 struct ocrdma_pd *pd)
1246 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1247 qp->sq_db = dev->nic_info.db +
1248 (pd->id * dev->nic_info.db_page_size) +
1249 OCRDMA_DB_GEN2_SQ_OFFSET;
1250 qp->rq_db = dev->nic_info.db +
1251 (pd->id * dev->nic_info.db_page_size) +
1252 OCRDMA_DB_GEN2_RQ_OFFSET;
1254 qp->sq_db = dev->nic_info.db +
1255 (pd->id * dev->nic_info.db_page_size) +
1256 OCRDMA_DB_SQ_OFFSET;
1257 qp->rq_db = dev->nic_info.db +
1258 (pd->id * dev->nic_info.db_page_size) +
1259 OCRDMA_DB_RQ_OFFSET;
1263 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1266 kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
1268 if (qp->wqe_wr_id_tbl == NULL)
1271 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
1272 if (qp->rqe_wr_id_tbl == NULL)
1278 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1279 struct ocrdma_pd *pd,
1280 struct ib_qp_init_attr *attrs)
1283 spin_lock_init(&qp->q_lock);
1284 INIT_LIST_HEAD(&qp->sq_entry);
1285 INIT_LIST_HEAD(&qp->rq_entry);
1287 qp->qp_type = attrs->qp_type;
1288 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1289 qp->max_inline_data = attrs->cap.max_inline_data;
1290 qp->sq.max_sges = attrs->cap.max_send_sge;
1291 qp->rq.max_sges = attrs->cap.max_recv_sge;
1292 qp->state = OCRDMA_QPS_RST;
1293 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1296 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1297 struct ib_qp_init_attr *attrs)
1299 if (attrs->qp_type == IB_QPT_GSI) {
1300 dev->gsi_qp_created = 1;
1301 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1302 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1306 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1307 struct ib_qp_init_attr *attrs,
1308 struct ib_udata *udata)
1311 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1312 struct ocrdma_qp *qp;
1313 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1314 struct ocrdma_create_qp_ureq ureq;
1315 u16 dpp_credit_lmt, dpp_offset;
1317 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
1321 memset(&ureq, 0, sizeof(ureq));
1323 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1324 return ERR_PTR(-EFAULT);
1326 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1331 ocrdma_set_qp_init_params(qp, pd, attrs);
1333 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1334 OCRDMA_QP_FAST_REG);
1336 mutex_lock(&dev->dev_lock);
1337 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1339 &dpp_offset, &dpp_credit_lmt);
1343 /* user space QP's wr_id table are managed in library */
1344 if (udata == NULL) {
1345 status = ocrdma_alloc_wr_id_tbl(qp);
1350 status = ocrdma_add_qpn_map(dev, qp);
1353 ocrdma_set_qp_db(dev, qp, pd);
1355 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1357 (attrs->srq != NULL));
1361 ocrdma_store_gsi_qp_cq(dev, attrs);
1362 qp->ibqp.qp_num = qp->id;
1363 mutex_unlock(&dev->dev_lock);
1367 ocrdma_del_qpn_map(dev, qp);
1369 ocrdma_mbx_destroy_qp(dev, qp);
1371 mutex_unlock(&dev->dev_lock);
1372 kfree(qp->wqe_wr_id_tbl);
1373 kfree(qp->rqe_wr_id_tbl);
1375 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1377 return ERR_PTR(status);
1380 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1384 struct ocrdma_qp *qp;
1385 struct ocrdma_dev *dev;
1386 enum ib_qp_state old_qps;
1388 qp = get_ocrdma_qp(ibqp);
1389 dev = get_ocrdma_dev(ibqp->device);
1390 if (attr_mask & IB_QP_STATE)
1391 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1392 /* if new and previous states are same hw doesn't need to
1397 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1400 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1401 int attr_mask, struct ib_udata *udata)
1403 unsigned long flags;
1404 int status = -EINVAL;
1405 struct ocrdma_qp *qp;
1406 struct ocrdma_dev *dev;
1407 enum ib_qp_state old_qps, new_qps;
1409 qp = get_ocrdma_qp(ibqp);
1410 dev = get_ocrdma_dev(ibqp->device);
1412 /* syncronize with multiple context trying to change, retrive qps */
1413 mutex_lock(&dev->dev_lock);
1414 /* syncronize with wqe, rqe posting and cqe processing contexts */
1415 spin_lock_irqsave(&qp->q_lock, flags);
1416 old_qps = get_ibqp_state(qp->state);
1417 if (attr_mask & IB_QP_STATE)
1418 new_qps = attr->qp_state;
1421 spin_unlock_irqrestore(&qp->q_lock, flags);
1423 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1424 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1425 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1426 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1431 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1435 mutex_unlock(&dev->dev_lock);
1439 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1457 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1459 int ib_qp_acc_flags = 0;
1461 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1462 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1463 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1464 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1465 return ib_qp_acc_flags;
1468 int ocrdma_query_qp(struct ib_qp *ibqp,
1469 struct ib_qp_attr *qp_attr,
1470 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1474 struct ocrdma_qp_params params;
1475 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1476 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1478 memset(¶ms, 0, sizeof(params));
1479 mutex_lock(&dev->dev_lock);
1480 status = ocrdma_mbx_query_qp(dev, qp, ¶ms);
1481 mutex_unlock(&dev->dev_lock);
1484 if (qp->qp_type == IB_QPT_UD)
1485 qp_attr->qkey = params.qkey;
1487 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1488 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1489 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1490 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1491 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1492 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1493 qp_attr->dest_qp_num =
1494 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1496 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1497 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1498 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1499 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1500 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1501 qp_attr->cap.max_inline_data = qp->max_inline_data;
1502 qp_init_attr->cap = qp_attr->cap;
1503 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1505 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1506 params.rnt_rc_sl_fl &
1507 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1509 (params.hop_lmt_rq_psn &
1510 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1511 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1512 (params.tclass_sq_psn &
1513 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1514 OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1515 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid[0]);
1517 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1518 rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1519 OCRDMA_QP_PARAMS_SL_MASK) >>
1520 OCRDMA_QP_PARAMS_SL_SHIFT);
1521 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1522 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1523 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1524 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1525 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1526 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1527 qp_attr->retry_cnt =
1528 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1529 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1530 qp_attr->min_rnr_timer = 0;
1531 qp_attr->pkey_index = 0;
1532 qp_attr->port_num = 1;
1533 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1534 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
1535 qp_attr->alt_pkey_index = 0;
1536 qp_attr->alt_port_num = 0;
1537 qp_attr->alt_timeout = 0;
1538 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1539 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1540 OCRDMA_QP_PARAMS_STATE_SHIFT;
1541 qp_attr->qp_state = get_ibqp_state(qp_state);
1542 qp_attr->cur_qp_state = qp_attr->qp_state;
1543 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1544 qp_attr->max_dest_rd_atomic =
1545 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1546 qp_attr->max_rd_atomic =
1547 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1548 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1549 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1550 /* Sync driver QP state with FW */
1551 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1556 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1558 unsigned int i = idx / 32;
1559 u32 mask = (1U << (idx % 32));
1561 srq->idx_bit_fields[i] ^= mask;
1564 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1566 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1569 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1571 return (qp->sq.tail == qp->sq.head);
1574 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1576 return (qp->rq.tail == qp->rq.head);
1579 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1581 return q->va + (q->head * q->entry_size);
1584 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1587 return q->va + (idx * q->entry_size);
1590 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1592 q->head = (q->head + 1) & q->max_wqe_idx;
1595 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1597 q->tail = (q->tail + 1) & q->max_wqe_idx;
1600 /* discard the cqe for a given QP */
1601 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1603 unsigned long cq_flags;
1604 unsigned long flags;
1605 int discard_cnt = 0;
1606 u32 cur_getp, stop_getp;
1607 struct ocrdma_cqe *cqe;
1608 u32 qpn = 0, wqe_idx = 0;
1610 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1612 /* traverse through the CQEs in the hw CQ,
1613 * find the matching CQE for a given qp,
1614 * mark the matching one discarded by clearing qpn.
1615 * ring the doorbell in the poll_cq() as
1616 * we don't complete out of order cqe.
1619 cur_getp = cq->getp;
1620 /* find upto when do we reap the cq. */
1621 stop_getp = cur_getp;
1623 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1626 cqe = cq->va + cur_getp;
1627 /* if (a) done reaping whole hw cq, or
1628 * (b) qp_xq becomes empty.
1631 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1632 /* if previously discarded cqe found, skip that too. */
1633 /* check for matching qp */
1634 if (qpn == 0 || qpn != qp->id)
1637 if (is_cqe_for_sq(cqe)) {
1638 ocrdma_hwq_inc_tail(&qp->sq);
1641 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1642 OCRDMA_CQE_BUFTAG_SHIFT) &
1643 qp->srq->rq.max_wqe_idx;
1644 BUG_ON(wqe_idx < 1);
1645 spin_lock_irqsave(&qp->srq->q_lock, flags);
1646 ocrdma_hwq_inc_tail(&qp->srq->rq);
1647 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1648 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1651 ocrdma_hwq_inc_tail(&qp->rq);
1654 /* mark cqe discarded so that it is not picked up later
1660 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1661 } while (cur_getp != stop_getp);
1662 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1665 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1668 unsigned long flags;
1669 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1670 /* sync with any active CQ poll */
1672 spin_lock_irqsave(&dev->flush_q_lock, flags);
1673 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1675 list_del(&qp->sq_entry);
1677 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1679 list_del(&qp->rq_entry);
1681 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1684 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1686 struct ocrdma_pd *pd;
1687 struct ocrdma_qp *qp;
1688 struct ocrdma_dev *dev;
1689 struct ib_qp_attr attrs;
1691 unsigned long flags;
1693 qp = get_ocrdma_qp(ibqp);
1694 dev = get_ocrdma_dev(ibqp->device);
1698 /* change the QP state to ERROR */
1699 if (qp->state != OCRDMA_QPS_RST) {
1700 attrs.qp_state = IB_QPS_ERR;
1701 attr_mask = IB_QP_STATE;
1702 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1704 /* ensure that CQEs for newly created QP (whose id may be same with
1705 * one which just getting destroyed are same), dont get
1706 * discarded until the old CQEs are discarded.
1708 mutex_lock(&dev->dev_lock);
1709 (void) ocrdma_mbx_destroy_qp(dev, qp);
1712 * acquire CQ lock while destroy is in progress, in order to
1713 * protect against proessing in-flight CQEs for this QP.
1715 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1716 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
1717 spin_lock(&qp->rq_cq->cq_lock);
1718 ocrdma_del_qpn_map(dev, qp);
1719 spin_unlock(&qp->rq_cq->cq_lock);
1721 ocrdma_del_qpn_map(dev, qp);
1723 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1726 ocrdma_discard_cqes(qp, qp->sq_cq);
1727 ocrdma_discard_cqes(qp, qp->rq_cq);
1729 mutex_unlock(&dev->dev_lock);
1732 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1733 PAGE_ALIGN(qp->sq.len));
1735 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1736 PAGE_ALIGN(qp->rq.len));
1739 ocrdma_del_flush_qp(qp);
1741 kfree(qp->wqe_wr_id_tbl);
1742 kfree(qp->rqe_wr_id_tbl);
1747 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1748 struct ib_udata *udata)
1751 struct ocrdma_create_srq_uresp uresp;
1753 memset(&uresp, 0, sizeof(uresp));
1754 uresp.rq_dbid = srq->rq.dbid;
1755 uresp.num_rq_pages = 1;
1756 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1757 uresp.rq_page_size = srq->rq.len;
1758 uresp.db_page_addr = dev->nic_info.unmapped_db +
1759 (srq->pd->id * dev->nic_info.db_page_size);
1760 uresp.db_page_size = dev->nic_info.db_page_size;
1761 uresp.num_rqe_allocated = srq->rq.max_cnt;
1762 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1763 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1764 uresp.db_shift = 24;
1766 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1767 uresp.db_shift = 16;
1770 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1773 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1774 uresp.rq_page_size);
1780 int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1781 struct ib_udata *udata)
1784 struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
1785 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1786 struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
1788 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1790 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1793 spin_lock_init(&srq->q_lock);
1795 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1796 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1801 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1803 if (!srq->rqe_wr_id_tbl) {
1808 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1809 (srq->rq.max_cnt % 32 ? 1 : 0);
1810 srq->idx_bit_fields =
1811 kmalloc_array(srq->bit_fields_len, sizeof(u32),
1813 if (!srq->idx_bit_fields) {
1817 memset(srq->idx_bit_fields, 0xff,
1818 srq->bit_fields_len * sizeof(u32));
1821 if (init_attr->attr.srq_limit) {
1822 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1828 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1836 ocrdma_mbx_destroy_srq(dev, srq);
1837 kfree(srq->rqe_wr_id_tbl);
1838 kfree(srq->idx_bit_fields);
1842 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1843 struct ib_srq_attr *srq_attr,
1844 enum ib_srq_attr_mask srq_attr_mask,
1845 struct ib_udata *udata)
1848 struct ocrdma_srq *srq;
1850 srq = get_ocrdma_srq(ibsrq);
1851 if (srq_attr_mask & IB_SRQ_MAX_WR)
1854 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1858 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1861 struct ocrdma_srq *srq;
1863 srq = get_ocrdma_srq(ibsrq);
1864 status = ocrdma_mbx_query_srq(srq, srq_attr);
1868 void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1870 struct ocrdma_srq *srq;
1871 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1873 srq = get_ocrdma_srq(ibsrq);
1875 ocrdma_mbx_destroy_srq(dev, srq);
1878 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1879 PAGE_ALIGN(srq->rq.len));
1881 kfree(srq->idx_bit_fields);
1882 kfree(srq->rqe_wr_id_tbl);
1885 /* unprivileged verbs and their support functions. */
1886 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1887 struct ocrdma_hdr_wqe *hdr,
1888 const struct ib_send_wr *wr)
1890 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1891 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1892 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1894 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1895 if (qp->qp_type == IB_QPT_GSI)
1896 ud_hdr->qkey = qp->qkey;
1898 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1899 ud_hdr->rsvd_ahid = ah->id;
1900 ud_hdr->hdr_type = ah->hdr_type;
1901 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1902 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1905 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1906 struct ocrdma_sge *sge, int num_sge,
1907 struct ib_sge *sg_list)
1911 for (i = 0; i < num_sge; i++) {
1912 sge[i].lrkey = sg_list[i].lkey;
1913 sge[i].addr_lo = sg_list[i].addr;
1914 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1915 sge[i].len = sg_list[i].length;
1916 hdr->total_len += sg_list[i].length;
1919 memset(sge, 0, sizeof(*sge));
1922 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1924 uint32_t total_len = 0, i;
1926 for (i = 0; i < num_sge; i++)
1927 total_len += sg_list[i].length;
1932 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1933 struct ocrdma_hdr_wqe *hdr,
1934 struct ocrdma_sge *sge,
1935 const struct ib_send_wr *wr, u32 wqe_size)
1940 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1941 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1942 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1943 pr_err("%s() supported_len=0x%x,\n"
1944 " unsupported len req=0x%x\n", __func__,
1945 qp->max_inline_data, hdr->total_len);
1948 dpp_addr = (char *)sge;
1949 for (i = 0; i < wr->num_sge; i++) {
1951 (void *)(unsigned long)wr->sg_list[i].addr,
1952 wr->sg_list[i].length);
1953 dpp_addr += wr->sg_list[i].length;
1956 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1957 if (0 == hdr->total_len)
1958 wqe_size += sizeof(struct ocrdma_sge);
1959 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1961 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1963 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1965 wqe_size += sizeof(struct ocrdma_sge);
1966 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1968 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1972 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1973 const struct ib_send_wr *wr)
1976 struct ocrdma_sge *sge;
1977 u32 wqe_size = sizeof(*hdr);
1979 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1980 ocrdma_build_ud_hdr(qp, hdr, wr);
1981 sge = (struct ocrdma_sge *)(hdr + 2);
1982 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1984 sge = (struct ocrdma_sge *)(hdr + 1);
1987 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1991 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1992 const struct ib_send_wr *wr)
1995 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1996 struct ocrdma_sge *sge = ext_rw + 1;
1997 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1999 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2002 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2003 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2004 ext_rw->lrkey = rdma_wr(wr)->rkey;
2005 ext_rw->len = hdr->total_len;
2009 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2010 const struct ib_send_wr *wr)
2012 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2013 struct ocrdma_sge *sge = ext_rw + 1;
2014 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2015 sizeof(struct ocrdma_hdr_wqe);
2017 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2018 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2019 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2020 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2022 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2023 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2024 ext_rw->lrkey = rdma_wr(wr)->rkey;
2025 ext_rw->len = hdr->total_len;
2028 static int get_encoded_page_size(int pg_sz)
2030 /* Max size is 256M 4096 << 16 */
2033 if (pg_sz == (4096 << i))
2038 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2039 struct ocrdma_hdr_wqe *hdr,
2040 const struct ib_reg_wr *wr)
2043 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2044 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2045 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2046 struct ocrdma_pbe *pbe;
2047 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2048 int num_pbes = 0, i;
2050 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2052 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2053 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2055 if (wr->access & IB_ACCESS_LOCAL_WRITE)
2056 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2057 if (wr->access & IB_ACCESS_REMOTE_WRITE)
2058 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2059 if (wr->access & IB_ACCESS_REMOTE_READ)
2060 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2061 hdr->lkey = wr->key;
2062 hdr->total_len = mr->ibmr.length;
2064 fbo = mr->ibmr.iova - mr->pages[0];
2066 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2067 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2068 fast_reg->fbo_hi = upper_32_bits(fbo);
2069 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2070 fast_reg->num_sges = mr->npages;
2071 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2074 for (i = 0; i < mr->npages; i++) {
2075 u64 buf_addr = mr->pages[i];
2077 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2078 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2082 /* if the pbl is full storing the pbes,
2085 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2087 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2094 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2096 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2098 iowrite32(val, qp->sq_db);
2101 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2102 const struct ib_send_wr **bad_wr)
2105 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2106 struct ocrdma_hdr_wqe *hdr;
2107 unsigned long flags;
2109 spin_lock_irqsave(&qp->q_lock, flags);
2110 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2111 spin_unlock_irqrestore(&qp->q_lock, flags);
2117 if (qp->qp_type == IB_QPT_UD &&
2118 (wr->opcode != IB_WR_SEND &&
2119 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2124 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2125 wr->num_sge > qp->sq.max_sges) {
2130 hdr = ocrdma_hwq_head(&qp->sq);
2132 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2133 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2134 if (wr->send_flags & IB_SEND_FENCE)
2136 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2137 if (wr->send_flags & IB_SEND_SOLICITED)
2139 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2141 switch (wr->opcode) {
2142 case IB_WR_SEND_WITH_IMM:
2143 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2144 hdr->immdt = ntohl(wr->ex.imm_data);
2147 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2148 ocrdma_build_send(qp, hdr, wr);
2150 case IB_WR_SEND_WITH_INV:
2151 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2152 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2153 hdr->lkey = wr->ex.invalidate_rkey;
2154 status = ocrdma_build_send(qp, hdr, wr);
2156 case IB_WR_RDMA_WRITE_WITH_IMM:
2157 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2158 hdr->immdt = ntohl(wr->ex.imm_data);
2160 case IB_WR_RDMA_WRITE:
2161 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2162 status = ocrdma_build_write(qp, hdr, wr);
2164 case IB_WR_RDMA_READ:
2165 ocrdma_build_read(qp, hdr, wr);
2167 case IB_WR_LOCAL_INV:
2169 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2170 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2171 sizeof(struct ocrdma_sge)) /
2172 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2173 hdr->lkey = wr->ex.invalidate_rkey;
2176 status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2186 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2187 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2189 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2190 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2191 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2192 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2193 /* make sure wqe is written before adapter can access it */
2195 /* inform hw to start processing it */
2196 ocrdma_ring_sq_db(qp);
2198 /* update pointer, counter for next wr */
2199 ocrdma_hwq_inc_head(&qp->sq);
2202 spin_unlock_irqrestore(&qp->q_lock, flags);
2206 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2208 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2210 iowrite32(val, qp->rq_db);
2213 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2214 const struct ib_recv_wr *wr, u16 tag)
2217 struct ocrdma_sge *sge;
2219 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2221 wqe_size = sizeof(*sge) + sizeof(*rqe);
2223 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2224 OCRDMA_WQE_SIZE_SHIFT);
2225 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2226 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2228 rqe->rsvd_tag = tag;
2229 sge = (struct ocrdma_sge *)(rqe + 1);
2230 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2231 ocrdma_cpu_to_le32(rqe, wqe_size);
2234 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2235 const struct ib_recv_wr **bad_wr)
2238 unsigned long flags;
2239 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2240 struct ocrdma_hdr_wqe *rqe;
2242 spin_lock_irqsave(&qp->q_lock, flags);
2243 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2244 spin_unlock_irqrestore(&qp->q_lock, flags);
2249 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2250 wr->num_sge > qp->rq.max_sges) {
2255 rqe = ocrdma_hwq_head(&qp->rq);
2256 ocrdma_build_rqe(rqe, wr, 0);
2258 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2259 /* make sure rqe is written before adapter can access it */
2262 /* inform hw to start processing it */
2263 ocrdma_ring_rq_db(qp);
2265 /* update pointer, counter for next wr */
2266 ocrdma_hwq_inc_head(&qp->rq);
2269 spin_unlock_irqrestore(&qp->q_lock, flags);
2273 /* cqe for srq's rqe can potentially arrive out of order.
2274 * index gives the entry in the shadow table where to store
2275 * the wr_id. tag/index is returned in cqe to reference back
2278 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2283 for (row = 0; row < srq->bit_fields_len; row++) {
2284 if (srq->idx_bit_fields[row]) {
2285 indx = ffs(srq->idx_bit_fields[row]);
2286 indx = (row * 32) + (indx - 1);
2287 BUG_ON(indx >= srq->rq.max_cnt);
2288 ocrdma_srq_toggle_bit(srq, indx);
2293 BUG_ON(row == srq->bit_fields_len);
2294 return indx + 1; /* Use from index 1 */
2297 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2299 u32 val = srq->rq.dbid | (1 << 16);
2301 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2304 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2305 const struct ib_recv_wr **bad_wr)
2308 unsigned long flags;
2309 struct ocrdma_srq *srq;
2310 struct ocrdma_hdr_wqe *rqe;
2313 srq = get_ocrdma_srq(ibsrq);
2315 spin_lock_irqsave(&srq->q_lock, flags);
2317 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2318 wr->num_sge > srq->rq.max_sges) {
2323 tag = ocrdma_srq_get_idx(srq);
2324 rqe = ocrdma_hwq_head(&srq->rq);
2325 ocrdma_build_rqe(rqe, wr, tag);
2327 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2328 /* make sure rqe is written before adapter can perform DMA */
2330 /* inform hw to start processing it */
2331 ocrdma_ring_srq_db(srq);
2332 /* update pointer, counter for next wr */
2333 ocrdma_hwq_inc_head(&srq->rq);
2336 spin_unlock_irqrestore(&srq->q_lock, flags);
2340 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2342 enum ib_wc_status ibwc_status;
2345 case OCRDMA_CQE_GENERAL_ERR:
2346 ibwc_status = IB_WC_GENERAL_ERR;
2348 case OCRDMA_CQE_LOC_LEN_ERR:
2349 ibwc_status = IB_WC_LOC_LEN_ERR;
2351 case OCRDMA_CQE_LOC_QP_OP_ERR:
2352 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2354 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2355 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2357 case OCRDMA_CQE_LOC_PROT_ERR:
2358 ibwc_status = IB_WC_LOC_PROT_ERR;
2360 case OCRDMA_CQE_WR_FLUSH_ERR:
2361 ibwc_status = IB_WC_WR_FLUSH_ERR;
2363 case OCRDMA_CQE_MW_BIND_ERR:
2364 ibwc_status = IB_WC_MW_BIND_ERR;
2366 case OCRDMA_CQE_BAD_RESP_ERR:
2367 ibwc_status = IB_WC_BAD_RESP_ERR;
2369 case OCRDMA_CQE_LOC_ACCESS_ERR:
2370 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2372 case OCRDMA_CQE_REM_INV_REQ_ERR:
2373 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2375 case OCRDMA_CQE_REM_ACCESS_ERR:
2376 ibwc_status = IB_WC_REM_ACCESS_ERR;
2378 case OCRDMA_CQE_REM_OP_ERR:
2379 ibwc_status = IB_WC_REM_OP_ERR;
2381 case OCRDMA_CQE_RETRY_EXC_ERR:
2382 ibwc_status = IB_WC_RETRY_EXC_ERR;
2384 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2385 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2387 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2388 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2390 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2391 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2393 case OCRDMA_CQE_REM_ABORT_ERR:
2394 ibwc_status = IB_WC_REM_ABORT_ERR;
2396 case OCRDMA_CQE_INV_EECN_ERR:
2397 ibwc_status = IB_WC_INV_EECN_ERR;
2399 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2400 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2402 case OCRDMA_CQE_FATAL_ERR:
2403 ibwc_status = IB_WC_FATAL_ERR;
2405 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2406 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2409 ibwc_status = IB_WC_GENERAL_ERR;
2415 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2418 struct ocrdma_hdr_wqe *hdr;
2419 struct ocrdma_sge *rw;
2422 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2424 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2425 /* Undo the hdr->cw swap */
2426 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2429 ibwc->opcode = IB_WC_RDMA_WRITE;
2432 rw = (struct ocrdma_sge *)(hdr + 1);
2433 ibwc->opcode = IB_WC_RDMA_READ;
2434 ibwc->byte_len = rw->len;
2437 ibwc->opcode = IB_WC_SEND;
2440 ibwc->opcode = IB_WC_REG_MR;
2442 case OCRDMA_LKEY_INV:
2443 ibwc->opcode = IB_WC_LOCAL_INV;
2446 ibwc->status = IB_WC_GENERAL_ERR;
2447 pr_err("%s() invalid opcode received = 0x%x\n",
2448 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2453 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2454 struct ocrdma_cqe *cqe)
2456 if (is_cqe_for_sq(cqe)) {
2457 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2458 cqe->flags_status_srcqpn) &
2459 ~OCRDMA_CQE_STATUS_MASK);
2460 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2461 cqe->flags_status_srcqpn) |
2462 (OCRDMA_CQE_WR_FLUSH_ERR <<
2463 OCRDMA_CQE_STATUS_SHIFT));
2465 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2466 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2467 cqe->flags_status_srcqpn) &
2468 ~OCRDMA_CQE_UD_STATUS_MASK);
2469 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2470 cqe->flags_status_srcqpn) |
2471 (OCRDMA_CQE_WR_FLUSH_ERR <<
2472 OCRDMA_CQE_UD_STATUS_SHIFT));
2474 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2475 cqe->flags_status_srcqpn) &
2476 ~OCRDMA_CQE_STATUS_MASK);
2477 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2478 cqe->flags_status_srcqpn) |
2479 (OCRDMA_CQE_WR_FLUSH_ERR <<
2480 OCRDMA_CQE_STATUS_SHIFT));
2485 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2486 struct ocrdma_qp *qp, int status)
2488 bool expand = false;
2491 ibwc->qp = &qp->ibqp;
2492 ibwc->status = ocrdma_to_ibwc_err(status);
2494 ocrdma_flush_qp(qp);
2495 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2497 /* if wqe/rqe pending for which cqe needs to be returned,
2498 * trigger inflating it.
2500 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2502 ocrdma_set_cqe_status_flushed(qp, cqe);
2507 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2508 struct ocrdma_qp *qp, int status)
2510 ibwc->opcode = IB_WC_RECV;
2511 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2512 ocrdma_hwq_inc_tail(&qp->rq);
2514 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2517 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2518 struct ocrdma_qp *qp, int status)
2520 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2521 ocrdma_hwq_inc_tail(&qp->sq);
2523 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2527 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2528 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2529 bool *polled, bool *stop)
2532 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2533 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2534 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2535 if (status < OCRDMA_MAX_CQE_ERR)
2536 atomic_inc(&dev->cqe_err_stats[status]);
2538 /* when hw sq is empty, but rq is not empty, so we continue
2539 * to keep the cqe in order to get the cq event again.
2541 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2542 /* when cq for rq and sq is same, it is safe to return
2543 * flush cqe for RQEs.
2545 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2547 status = OCRDMA_CQE_WR_FLUSH_ERR;
2548 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2550 /* stop processing further cqe as this cqe is used for
2551 * triggering cq event on buddy cq of RQ.
2552 * When QP is destroyed, this cqe will be removed
2553 * from the cq's hardware q.
2559 } else if (is_hw_sq_empty(qp)) {
2566 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2571 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2572 struct ocrdma_cqe *cqe,
2573 struct ib_wc *ibwc, bool *polled)
2575 bool expand = false;
2576 int tail = qp->sq.tail;
2579 if (!qp->wqe_wr_id_tbl[tail].signaled) {
2580 *polled = false; /* WC cannot be consumed yet */
2582 ibwc->status = IB_WC_SUCCESS;
2584 ibwc->qp = &qp->ibqp;
2585 ocrdma_update_wc(qp, ibwc, tail);
2588 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2589 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2590 if (tail != wqe_idx)
2591 expand = true; /* Coalesced CQE can't be consumed yet */
2593 ocrdma_hwq_inc_tail(&qp->sq);
2597 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2598 struct ib_wc *ibwc, bool *polled, bool *stop)
2603 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2604 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2606 if (status == OCRDMA_CQE_SUCCESS)
2607 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2609 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2613 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2614 struct ocrdma_cqe *cqe)
2619 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2620 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2621 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2622 OCRDMA_CQE_SRCQP_MASK;
2623 ibwc->pkey_index = 0;
2624 ibwc->wc_flags = IB_WC_GRH;
2625 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2626 OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2627 OCRDMA_CQE_UD_XFER_LEN_MASK;
2629 if (ocrdma_is_udp_encap_supported(dev)) {
2630 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2631 OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2632 OCRDMA_CQE_UD_L3TYPE_MASK;
2633 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2634 ibwc->network_hdr_type = hdr_type;
2640 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2641 struct ocrdma_cqe *cqe,
2642 struct ocrdma_qp *qp)
2644 unsigned long flags;
2645 struct ocrdma_srq *srq;
2648 srq = get_ocrdma_srq(qp->ibqp.srq);
2649 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2650 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2651 BUG_ON(wqe_idx < 1);
2653 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2654 spin_lock_irqsave(&srq->q_lock, flags);
2655 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2656 spin_unlock_irqrestore(&srq->q_lock, flags);
2657 ocrdma_hwq_inc_tail(&srq->rq);
2660 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2661 struct ib_wc *ibwc, bool *polled, bool *stop,
2665 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2667 if (status < OCRDMA_MAX_CQE_ERR)
2668 atomic_inc(&dev->cqe_err_stats[status]);
2670 /* when hw_rq is empty, but wq is not empty, so continue
2671 * to keep the cqe to get the cq event again.
2673 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2674 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2676 status = OCRDMA_CQE_WR_FLUSH_ERR;
2677 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2683 } else if (is_hw_rq_empty(qp)) {
2690 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2695 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2696 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2698 struct ocrdma_dev *dev;
2700 dev = get_ocrdma_dev(qp->ibqp.device);
2701 ibwc->opcode = IB_WC_RECV;
2702 ibwc->qp = &qp->ibqp;
2703 ibwc->status = IB_WC_SUCCESS;
2705 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2706 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2708 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2710 if (is_cqe_imm(cqe)) {
2711 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2712 ibwc->wc_flags |= IB_WC_WITH_IMM;
2713 } else if (is_cqe_wr_imm(cqe)) {
2714 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2715 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2716 ibwc->wc_flags |= IB_WC_WITH_IMM;
2717 } else if (is_cqe_invalidated(cqe)) {
2718 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2719 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2722 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2724 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2725 ocrdma_hwq_inc_tail(&qp->rq);
2729 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2730 struct ib_wc *ibwc, bool *polled, bool *stop)
2733 bool expand = false;
2736 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2737 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2738 OCRDMA_CQE_UD_STATUS_MASK) >>
2739 OCRDMA_CQE_UD_STATUS_SHIFT;
2741 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2742 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2745 if (status == OCRDMA_CQE_SUCCESS) {
2747 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2749 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2755 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2758 if (cq->phase_change) {
2760 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2762 /* clear valid bit */
2763 cqe->flags_status_srcqpn = 0;
2767 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2772 bool expand = false;
2773 int polled_hw_cqes = 0;
2774 struct ocrdma_qp *qp = NULL;
2775 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2776 struct ocrdma_cqe *cqe;
2777 u16 cur_getp; bool polled = false; bool stop = false;
2779 cur_getp = cq->getp;
2780 while (num_entries) {
2781 cqe = cq->va + cur_getp;
2782 /* check whether valid cqe or not */
2783 if (!is_cqe_valid(cq, cqe))
2785 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2786 /* ignore discarded cqe */
2789 qp = dev->qp_tbl[qpn];
2792 if (is_cqe_for_sq(cqe)) {
2793 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2796 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2803 /* clear qpn to avoid duplicate processing by discard_cqe() */
2806 polled_hw_cqes += 1;
2807 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2808 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2818 cq->getp = cur_getp;
2821 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2826 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2827 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2828 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2832 while (num_entries) {
2833 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2835 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2836 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2837 ocrdma_hwq_inc_tail(&qp->sq);
2838 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2839 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2840 ocrdma_hwq_inc_tail(&qp->rq);
2845 ibwc->status = IB_WC_WR_FLUSH_ERR;
2853 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2855 int cqes_to_poll = num_entries;
2856 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2857 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2858 int num_os_cqe = 0, err_cqes = 0;
2859 struct ocrdma_qp *qp;
2860 unsigned long flags;
2862 /* poll cqes from adapter CQ */
2863 spin_lock_irqsave(&cq->cq_lock, flags);
2864 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2865 spin_unlock_irqrestore(&cq->cq_lock, flags);
2866 cqes_to_poll -= num_os_cqe;
2869 wc = wc + num_os_cqe;
2870 /* adapter returns single error cqe when qp moves to
2871 * error state. So insert error cqes with wc_status as
2872 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2873 * respectively which uses this CQ.
2875 spin_lock_irqsave(&dev->flush_q_lock, flags);
2876 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2877 if (cqes_to_poll == 0)
2879 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2880 cqes_to_poll -= err_cqes;
2881 num_os_cqe += err_cqes;
2884 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2889 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2891 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2892 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2894 unsigned long flags;
2895 bool arm_needed = false, sol_needed = false;
2899 spin_lock_irqsave(&cq->cq_lock, flags);
2900 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2902 if (cq_flags & IB_CQ_SOLICITED)
2905 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2906 spin_unlock_irqrestore(&cq->cq_lock, flags);
2911 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2912 u32 max_num_sg, struct ib_udata *udata)
2915 struct ocrdma_mr *mr;
2916 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2917 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2919 if (mr_type != IB_MR_TYPE_MEM_REG)
2920 return ERR_PTR(-EINVAL);
2922 if (max_num_sg > dev->attr.max_pages_per_frmr)
2923 return ERR_PTR(-EINVAL);
2925 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2927 return ERR_PTR(-ENOMEM);
2929 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2935 status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
2939 mr->hwmr.remote_rd = 0;
2940 mr->hwmr.remote_wr = 0;
2941 mr->hwmr.local_rd = 0;
2942 mr->hwmr.local_wr = 0;
2943 mr->hwmr.mw_bind = 0;
2944 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2947 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2950 mr->ibmr.rkey = mr->hwmr.lkey;
2951 mr->ibmr.lkey = mr->hwmr.lkey;
2952 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2956 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2961 return ERR_PTR(-ENOMEM);
2964 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
2966 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2968 if (unlikely(mr->npages == mr->hwmr.num_pbes))
2971 mr->pages[mr->npages++] = addr;
2976 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2977 unsigned int *sg_offset)
2979 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2983 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);