1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
28 #include <linux/dma-mapping.h>
29 #include <rdma/ib_verbs.h>
30 #include <rdma/ib_user_verbs.h>
31 #include <rdma/iw_cm.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_addr.h>
36 #include "ocrdma_hw.h"
37 #include "ocrdma_verbs.h"
38 #include "ocrdma_abi.h"
40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50 int index, union ib_gid *sgid)
52 struct ocrdma_dev *dev;
54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid));
56 if (index >= OCRDMA_MAX_SGID)
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
66 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
68 memset(attr, 0, sizeof *attr);
69 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72 attr->max_mr_size = dev->attr.max_mr_size;
73 attr->page_size_cap = 0xffff000;
74 attr->vendor_id = dev->nic_info.pdev->vendor;
75 attr->vendor_part_id = dev->nic_info.pdev->device;
76 attr->hw_ver = dev->asic_id;
77 attr->max_qp = dev->attr.max_qp;
78 attr->max_ah = OCRDMA_MAX_AH;
79 attr->max_qp_wr = dev->attr.max_wqe;
81 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82 IB_DEVICE_RC_RNR_NAK_GEN |
83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY |
86 IB_DEVICE_MEM_MGT_EXTENSIONS;
87 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
89 attr->max_cq = dev->attr.max_cq;
90 attr->max_cqe = dev->attr.max_cqe;
91 attr->max_mr = dev->attr.max_mr;
92 attr->max_mw = dev->attr.max_mw;
93 attr->max_pd = dev->attr.max_pd;
96 attr->max_map_per_fmr = 0;
97 attr->max_qp_rd_atom =
98 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
100 attr->max_srq = dev->attr.max_srq;
101 attr->max_srq_sge = dev->attr.max_srq_sge;
102 attr->max_srq_wr = dev->attr.max_rqe;
103 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
109 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110 u8 *ib_speed, u8 *ib_width)
115 status = ocrdma_mbx_get_link_speed(dev, &speed);
117 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
120 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121 *ib_speed = IB_SPEED_SDR;
122 *ib_width = IB_WIDTH_1X;
125 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126 *ib_speed = IB_SPEED_QDR;
127 *ib_width = IB_WIDTH_1X;
130 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131 *ib_speed = IB_SPEED_DDR;
132 *ib_width = IB_WIDTH_4X;
135 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136 *ib_speed = IB_SPEED_QDR;
137 *ib_width = IB_WIDTH_4X;
142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X;
147 int ocrdma_query_port(struct ib_device *ibdev,
148 u8 port, struct ib_port_attr *props)
150 enum ib_port_state port_state;
151 struct ocrdma_dev *dev;
152 struct net_device *netdev;
154 dev = get_ocrdma_dev(ibdev);
156 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
160 netdev = dev->nic_info.netdev;
161 if (netif_running(netdev) && netif_oper_up(netdev)) {
162 port_state = IB_PORT_ACTIVE;
163 props->phys_state = 5;
165 port_state = IB_PORT_DOWN;
166 props->phys_state = 3;
168 props->max_mtu = IB_MTU_4096;
169 props->active_mtu = iboe_get_mtu(netdev->mtu);
174 props->state = port_state;
175 props->port_cap_flags =
178 IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
179 props->gid_tbl_len = OCRDMA_MAX_SGID;
180 props->pkey_tbl_len = 1;
181 props->bad_pkey_cntr = 0;
182 props->qkey_viol_cntr = 0;
183 get_link_speed_and_width(dev, &props->active_speed,
184 &props->active_width);
185 props->max_msg_sz = 0x80000000;
186 props->max_vl_num = 4;
190 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
191 struct ib_port_modify *props)
193 struct ocrdma_dev *dev;
195 dev = get_ocrdma_dev(ibdev);
197 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
203 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
206 struct ocrdma_mm *mm;
208 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
211 mm->key.phy_addr = phy_addr;
213 INIT_LIST_HEAD(&mm->entry);
215 mutex_lock(&uctx->mm_list_lock);
216 list_add_tail(&mm->entry, &uctx->mm_head);
217 mutex_unlock(&uctx->mm_list_lock);
221 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
224 struct ocrdma_mm *mm, *tmp;
226 mutex_lock(&uctx->mm_list_lock);
227 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
228 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
231 list_del(&mm->entry);
235 mutex_unlock(&uctx->mm_list_lock);
238 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
242 struct ocrdma_mm *mm;
244 mutex_lock(&uctx->mm_list_lock);
245 list_for_each_entry(mm, &uctx->mm_head, entry) {
246 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
252 mutex_unlock(&uctx->mm_list_lock);
257 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
259 u16 pd_bitmap_idx = 0;
260 const unsigned long *pd_bitmap;
263 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
264 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
265 dev->pd_mgr->max_dpp_pd);
266 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
267 dev->pd_mgr->pd_dpp_count++;
268 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
269 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
271 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
272 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
273 dev->pd_mgr->max_normal_pd);
274 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
275 dev->pd_mgr->pd_norm_count++;
276 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
277 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
279 return pd_bitmap_idx;
282 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
288 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
289 dev->pd_mgr->pd_norm_count;
294 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
295 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
298 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
299 dev->pd_mgr->pd_dpp_count--;
302 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
303 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
306 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
307 dev->pd_mgr->pd_norm_count--;
314 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
319 mutex_lock(&dev->dev_lock);
320 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
321 mutex_unlock(&dev->dev_lock);
325 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
330 mutex_lock(&dev->dev_lock);
331 if (pd->dpp_enabled) {
332 /* try allocating DPP PD, if not available then normal PD */
333 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
337 } else if (dev->pd_mgr->pd_norm_count <
338 dev->pd_mgr->max_normal_pd) {
339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
341 pd->dpp_enabled = false;
346 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
353 mutex_unlock(&dev->dev_lock);
357 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
358 struct ocrdma_ucontext *uctx,
359 struct ib_udata *udata)
361 struct ocrdma_pd *pd = NULL;
364 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
366 return ERR_PTR(-ENOMEM);
368 if (udata && uctx && dev->attr.max_dpp_pds) {
370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
372 pd->dpp_enabled ? (dev->nic_info.db_page_size /
373 dev->attr.wqe_size) : 0;
376 if (dev->pd_mgr->pd_prealloc_valid) {
377 status = ocrdma_get_pd_num(dev, pd);
378 return (status == 0) ? pd : ERR_PTR(status);
382 status = ocrdma_mbx_alloc_pd(dev, pd);
384 if (pd->dpp_enabled) {
385 pd->dpp_enabled = false;
390 return ERR_PTR(status);
397 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
398 struct ocrdma_pd *pd)
400 return (uctx->cntxt_pd == pd ? true : false);
403 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
404 struct ocrdma_pd *pd)
408 if (dev->pd_mgr->pd_prealloc_valid)
409 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
411 status = ocrdma_mbx_dealloc_pd(dev, pd);
417 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
418 struct ocrdma_ucontext *uctx,
419 struct ib_udata *udata)
423 uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
424 if (IS_ERR(uctx->cntxt_pd)) {
425 status = PTR_ERR(uctx->cntxt_pd);
426 uctx->cntxt_pd = NULL;
430 uctx->cntxt_pd->uctx = uctx;
431 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
436 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
438 struct ocrdma_pd *pd = uctx->cntxt_pd;
439 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
441 if (uctx->pd_in_use) {
442 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
443 __func__, dev->id, pd->id);
445 uctx->cntxt_pd = NULL;
446 (void)_ocrdma_dealloc_pd(dev, pd);
450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
452 struct ocrdma_pd *pd = NULL;
454 mutex_lock(&uctx->mm_list_lock);
455 if (!uctx->pd_in_use) {
456 uctx->pd_in_use = true;
459 mutex_unlock(&uctx->mm_list_lock);
464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
466 mutex_lock(&uctx->mm_list_lock);
467 uctx->pd_in_use = false;
468 mutex_unlock(&uctx->mm_list_lock);
471 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
472 struct ib_udata *udata)
475 struct ocrdma_ucontext *ctx;
476 struct ocrdma_alloc_ucontext_resp resp;
477 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
478 struct pci_dev *pdev = dev->nic_info.pdev;
479 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
482 return ERR_PTR(-EFAULT);
483 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
485 return ERR_PTR(-ENOMEM);
486 INIT_LIST_HEAD(&ctx->mm_head);
487 mutex_init(&ctx->mm_list_lock);
489 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
490 &ctx->ah_tbl.pa, GFP_KERNEL);
491 if (!ctx->ah_tbl.va) {
493 return ERR_PTR(-ENOMEM);
495 memset(ctx->ah_tbl.va, 0, map_len);
496 ctx->ah_tbl.len = map_len;
498 memset(&resp, 0, sizeof(resp));
499 resp.ah_tbl_len = ctx->ah_tbl.len;
500 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
502 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
506 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
510 resp.dev_id = dev->id;
511 resp.max_inline_data = dev->attr.max_inline_data;
512 resp.wqe_size = dev->attr.wqe_size;
513 resp.rqe_size = dev->attr.rqe_size;
514 resp.dpp_wqe_size = dev->attr.wqe_size;
516 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
517 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
520 return &ctx->ibucontext;
524 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
526 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
529 return ERR_PTR(status);
532 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
535 struct ocrdma_mm *mm, *tmp;
536 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
537 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
538 struct pci_dev *pdev = dev->nic_info.pdev;
540 status = ocrdma_dealloc_ucontext_pd(uctx);
542 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
543 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
546 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
547 list_del(&mm->entry);
554 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
556 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
557 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
558 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
559 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
560 unsigned long len = (vma->vm_end - vma->vm_start);
564 if (vma->vm_start & (PAGE_SIZE - 1))
566 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
570 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
571 dev->nic_info.db_total_size)) &&
572 (len <= dev->nic_info.db_page_size)) {
573 if (vma->vm_flags & VM_READ)
576 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
577 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
578 len, vma->vm_page_prot);
579 } else if (dev->nic_info.dpp_unmapped_len &&
580 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
581 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
582 dev->nic_info.dpp_unmapped_len)) &&
583 (len <= dev->nic_info.dpp_unmapped_len)) {
584 if (vma->vm_flags & VM_READ)
587 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
588 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
589 len, vma->vm_page_prot);
591 status = remap_pfn_range(vma, vma->vm_start,
592 vma->vm_pgoff, len, vma->vm_page_prot);
597 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
598 struct ib_ucontext *ib_ctx,
599 struct ib_udata *udata)
603 u64 dpp_page_addr = 0;
605 struct ocrdma_alloc_pd_uresp rsp;
606 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
608 memset(&rsp, 0, sizeof(rsp));
610 rsp.dpp_enabled = pd->dpp_enabled;
611 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
612 db_page_size = dev->nic_info.db_page_size;
614 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
618 if (pd->dpp_enabled) {
619 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
620 (pd->id * PAGE_SIZE);
621 status = ocrdma_add_mmap(uctx, dpp_page_addr,
625 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
626 rsp.dpp_page_addr_lo = dpp_page_addr;
629 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
638 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
640 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
644 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
645 struct ib_ucontext *context,
646 struct ib_udata *udata)
648 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
649 struct ocrdma_pd *pd;
650 struct ocrdma_ucontext *uctx = NULL;
652 u8 is_uctx_pd = false;
654 if (udata && context) {
655 uctx = get_ocrdma_ucontext(context);
656 pd = ocrdma_get_ucontext_pd(uctx);
663 pd = _ocrdma_alloc_pd(dev, uctx, udata);
665 status = PTR_ERR(pd);
670 if (udata && context) {
671 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
679 ocrdma_release_ucontext_pd(uctx);
681 status = _ocrdma_dealloc_pd(dev, pd);
685 return ERR_PTR(status);
688 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
690 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
691 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
692 struct ocrdma_ucontext *uctx = NULL;
698 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
699 (pd->id * PAGE_SIZE);
701 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
702 usr_db = ocrdma_get_db_addr(dev, pd->id);
703 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
705 if (is_ucontext_pd(uctx, pd)) {
706 ocrdma_release_ucontext_pd(uctx);
710 status = _ocrdma_dealloc_pd(dev, pd);
714 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
715 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
720 mr->hwmr.local_rd = 1;
721 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
722 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
723 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
724 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
725 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
726 mr->hwmr.num_pbls = num_pbls;
728 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
732 mr->ibmr.lkey = mr->hwmr.lkey;
733 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
734 mr->ibmr.rkey = mr->hwmr.lkey;
738 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
741 struct ocrdma_mr *mr;
742 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
743 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
745 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
746 pr_err("%s err, invalid access rights\n", __func__);
747 return ERR_PTR(-EINVAL);
750 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
752 return ERR_PTR(-ENOMEM);
754 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
755 OCRDMA_ADDR_CHECK_DISABLE);
758 return ERR_PTR(status);
764 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
765 struct ocrdma_hw_mr *mr)
767 struct pci_dev *pdev = dev->nic_info.pdev;
771 for (i = 0; i < mr->num_pbls; i++) {
772 if (!mr->pbl_table[i].va)
774 dma_free_coherent(&pdev->dev, mr->pbl_size,
776 mr->pbl_table[i].pa);
778 kfree(mr->pbl_table);
779 mr->pbl_table = NULL;
783 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
792 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
793 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
797 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
798 num_pbls = num_pbls / (pbl_size / sizeof(u64));
800 } while (num_pbls >= dev->attr.max_num_mr_pbl);
802 mr->hwmr.num_pbes = num_pbes;
803 mr->hwmr.num_pbls = num_pbls;
804 mr->hwmr.pbl_size = pbl_size;
808 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
812 u32 dma_len = mr->pbl_size;
813 struct pci_dev *pdev = dev->nic_info.pdev;
817 mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
818 mr->num_pbls, GFP_KERNEL);
823 for (i = 0; i < mr->num_pbls; i++) {
824 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
826 ocrdma_free_mr_pbl_tbl(dev, mr);
830 memset(va, 0, dma_len);
831 mr->pbl_table[i].va = va;
832 mr->pbl_table[i].pa = pa;
837 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
840 struct ocrdma_pbe *pbe;
841 struct scatterlist *sg;
842 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
843 struct ib_umem *umem = mr->umem;
844 int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
846 if (!mr->hwmr.num_pbes)
849 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
852 shift = ilog2(umem->page_size);
854 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
855 pages = sg_dma_len(sg) >> shift;
856 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
857 /* store the page address in pbe */
859 cpu_to_le32(sg_dma_address
861 (umem->page_size * pg_cnt));
863 cpu_to_le32(upper_32_bits
866 umem->page_size * pg_cnt)));
871 /* if done building pbes, issue the mbx cmd. */
872 if (total_num_pbes == num_pbes)
875 /* if the given pbl is full storing the pbes,
879 (mr->hwmr.pbl_size / sizeof(u64))) {
881 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
889 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
890 u64 usr_addr, int acc, struct ib_udata *udata)
892 int status = -ENOMEM;
893 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
894 struct ocrdma_mr *mr;
895 struct ocrdma_pd *pd;
898 pd = get_ocrdma_pd(ibpd);
900 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
901 return ERR_PTR(-EINVAL);
903 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
905 return ERR_PTR(status);
906 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
907 if (IS_ERR(mr->umem)) {
911 num_pbes = ib_umem_page_count(mr->umem);
912 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
916 mr->hwmr.pbe_size = mr->umem->page_size;
917 mr->hwmr.fbo = ib_umem_offset(mr->umem);
918 mr->hwmr.va = usr_addr;
920 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
921 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
922 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
923 mr->hwmr.local_rd = 1;
924 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
925 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
928 build_user_pbes(dev, mr, num_pbes);
929 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
932 mr->ibmr.lkey = mr->hwmr.lkey;
933 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
934 mr->ibmr.rkey = mr->hwmr.lkey;
939 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
942 return ERR_PTR(status);
945 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
947 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
948 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
950 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
952 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
954 /* it could be user registered memory. */
956 ib_umem_release(mr->umem);
959 /* Don't stop cleanup, in case FW is unresponsive */
960 if (dev->mqe_ctx.fw_error_state) {
961 pr_err("%s(%d) fw not responding.\n",
967 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
968 struct ib_udata *udata,
969 struct ib_ucontext *ib_ctx)
972 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
973 struct ocrdma_create_cq_uresp uresp;
975 memset(&uresp, 0, sizeof(uresp));
976 uresp.cq_id = cq->id;
977 uresp.page_size = PAGE_ALIGN(cq->len);
979 uresp.max_hw_cqe = cq->max_hw_cqe;
980 uresp.page_addr[0] = virt_to_phys(cq->va);
981 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
982 uresp.db_page_size = dev->nic_info.db_page_size;
983 uresp.phase_change = cq->phase_change ? 1 : 0;
984 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
986 pr_err("%s(%d) copy error cqid=0x%x.\n",
987 __func__, dev->id, cq->id);
990 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
993 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
995 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1003 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
1004 struct ib_ucontext *ib_ctx,
1005 struct ib_udata *udata)
1007 struct ocrdma_cq *cq;
1008 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1009 struct ocrdma_ucontext *uctx = NULL;
1012 struct ocrdma_create_cq_ureq ureq;
1015 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1016 return ERR_PTR(-EFAULT);
1019 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1021 return ERR_PTR(-ENOMEM);
1023 spin_lock_init(&cq->cq_lock);
1024 spin_lock_init(&cq->comp_handler_lock);
1025 INIT_LIST_HEAD(&cq->sq_head);
1026 INIT_LIST_HEAD(&cq->rq_head);
1027 cq->first_arm = true;
1030 uctx = get_ocrdma_ucontext(ib_ctx);
1031 pd_id = uctx->cntxt_pd->id;
1034 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1037 return ERR_PTR(status);
1040 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1044 cq->phase = OCRDMA_CQE_VALID;
1045 dev->cq_tbl[cq->id] = cq;
1049 ocrdma_mbx_destroy_cq(dev, cq);
1051 return ERR_PTR(status);
1054 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1055 struct ib_udata *udata)
1058 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1060 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1064 ibcq->cqe = new_cnt;
1068 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1071 int valid_count = 0;
1072 unsigned long flags;
1074 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1075 struct ocrdma_cqe *cqe = NULL;
1078 cqe_cnt = cq->cqe_cnt;
1080 /* Last irq might have scheduled a polling thread
1081 * sync-up with it before hard flushing.
1083 spin_lock_irqsave(&cq->cq_lock, flags);
1085 if (is_cqe_valid(cq, cqe))
1090 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1091 spin_unlock_irqrestore(&cq->cq_lock, flags);
1094 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1096 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1097 struct ocrdma_eq *eq = NULL;
1098 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1102 dev->cq_tbl[cq->id] = NULL;
1103 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1104 if (indx == -EINVAL)
1107 eq = &dev->eq_tbl[indx];
1108 irq = ocrdma_get_irq(dev, eq);
1109 synchronize_irq(irq);
1110 ocrdma_flush_cq(cq);
1112 (void)ocrdma_mbx_destroy_cq(dev, cq);
1114 pdid = cq->ucontext->cntxt_pd->id;
1115 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1116 PAGE_ALIGN(cq->len));
1117 ocrdma_del_mmap(cq->ucontext,
1118 ocrdma_get_db_addr(dev, pdid),
1119 dev->nic_info.db_page_size);
1126 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1128 int status = -EINVAL;
1130 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1131 dev->qp_tbl[qp->id] = qp;
1137 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1139 dev->qp_tbl[qp->id] = NULL;
1142 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1143 struct ib_qp_init_attr *attrs)
1145 if ((attrs->qp_type != IB_QPT_GSI) &&
1146 (attrs->qp_type != IB_QPT_RC) &&
1147 (attrs->qp_type != IB_QPT_UC) &&
1148 (attrs->qp_type != IB_QPT_UD)) {
1149 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1150 __func__, dev->id, attrs->qp_type);
1153 /* Skip the check for QP1 to support CM size of 128 */
1154 if ((attrs->qp_type != IB_QPT_GSI) &&
1155 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1156 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1157 __func__, dev->id, attrs->cap.max_send_wr);
1158 pr_err("%s(%d) supported send_wr=0x%x\n",
1159 __func__, dev->id, dev->attr.max_wqe);
1162 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1163 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1164 __func__, dev->id, attrs->cap.max_recv_wr);
1165 pr_err("%s(%d) supported recv_wr=0x%x\n",
1166 __func__, dev->id, dev->attr.max_rqe);
1169 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1170 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1171 __func__, dev->id, attrs->cap.max_inline_data);
1172 pr_err("%s(%d) supported inline data size=0x%x\n",
1173 __func__, dev->id, dev->attr.max_inline_data);
1176 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1177 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1178 __func__, dev->id, attrs->cap.max_send_sge);
1179 pr_err("%s(%d) supported send_sge=0x%x\n",
1180 __func__, dev->id, dev->attr.max_send_sge);
1183 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1184 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1185 __func__, dev->id, attrs->cap.max_recv_sge);
1186 pr_err("%s(%d) supported recv_sge=0x%x\n",
1187 __func__, dev->id, dev->attr.max_recv_sge);
1190 /* unprivileged user space cannot create special QP */
1191 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1193 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1194 __func__, dev->id, attrs->qp_type);
1197 /* allow creating only one GSI type of QP */
1198 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1199 pr_err("%s(%d) GSI special QPs already created.\n",
1203 /* verify consumer QPs are not trying to use GSI QP's CQ */
1204 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1205 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1206 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1207 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1215 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1216 struct ib_udata *udata, int dpp_offset,
1217 int dpp_credit_lmt, int srq)
1221 struct ocrdma_create_qp_uresp uresp;
1222 struct ocrdma_pd *pd = qp->pd;
1223 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1225 memset(&uresp, 0, sizeof(uresp));
1226 usr_db = dev->nic_info.unmapped_db +
1227 (pd->id * dev->nic_info.db_page_size);
1228 uresp.qp_id = qp->id;
1229 uresp.sq_dbid = qp->sq.dbid;
1230 uresp.num_sq_pages = 1;
1231 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1232 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1233 uresp.num_wqe_allocated = qp->sq.max_cnt;
1235 uresp.rq_dbid = qp->rq.dbid;
1236 uresp.num_rq_pages = 1;
1237 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1238 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1239 uresp.num_rqe_allocated = qp->rq.max_cnt;
1241 uresp.db_page_addr = usr_db;
1242 uresp.db_page_size = dev->nic_info.db_page_size;
1243 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1244 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1245 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1247 if (qp->dpp_enabled) {
1248 uresp.dpp_credit = dpp_credit_lmt;
1249 uresp.dpp_offset = dpp_offset;
1251 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1253 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1256 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1257 uresp.sq_page_size);
1262 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1263 uresp.rq_page_size);
1269 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1274 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1275 struct ocrdma_pd *pd)
1277 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1278 qp->sq_db = dev->nic_info.db +
1279 (pd->id * dev->nic_info.db_page_size) +
1280 OCRDMA_DB_GEN2_SQ_OFFSET;
1281 qp->rq_db = dev->nic_info.db +
1282 (pd->id * dev->nic_info.db_page_size) +
1283 OCRDMA_DB_GEN2_RQ_OFFSET;
1285 qp->sq_db = dev->nic_info.db +
1286 (pd->id * dev->nic_info.db_page_size) +
1287 OCRDMA_DB_SQ_OFFSET;
1288 qp->rq_db = dev->nic_info.db +
1289 (pd->id * dev->nic_info.db_page_size) +
1290 OCRDMA_DB_RQ_OFFSET;
1294 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1297 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1299 if (qp->wqe_wr_id_tbl == NULL)
1302 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1303 if (qp->rqe_wr_id_tbl == NULL)
1309 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1310 struct ocrdma_pd *pd,
1311 struct ib_qp_init_attr *attrs)
1314 spin_lock_init(&qp->q_lock);
1315 INIT_LIST_HEAD(&qp->sq_entry);
1316 INIT_LIST_HEAD(&qp->rq_entry);
1318 qp->qp_type = attrs->qp_type;
1319 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1320 qp->max_inline_data = attrs->cap.max_inline_data;
1321 qp->sq.max_sges = attrs->cap.max_send_sge;
1322 qp->rq.max_sges = attrs->cap.max_recv_sge;
1323 qp->state = OCRDMA_QPS_RST;
1324 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1327 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1328 struct ib_qp_init_attr *attrs)
1330 if (attrs->qp_type == IB_QPT_GSI) {
1331 dev->gsi_qp_created = 1;
1332 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1333 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1337 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1338 struct ib_qp_init_attr *attrs,
1339 struct ib_udata *udata)
1342 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1343 struct ocrdma_qp *qp;
1344 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1345 struct ocrdma_create_qp_ureq ureq;
1346 u16 dpp_credit_lmt, dpp_offset;
1348 status = ocrdma_check_qp_params(ibpd, dev, attrs);
1352 memset(&ureq, 0, sizeof(ureq));
1354 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1355 return ERR_PTR(-EFAULT);
1357 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1362 ocrdma_set_qp_init_params(qp, pd, attrs);
1364 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1365 OCRDMA_QP_FAST_REG);
1367 mutex_lock(&dev->dev_lock);
1368 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1370 &dpp_offset, &dpp_credit_lmt);
1374 /* user space QP's wr_id table are managed in library */
1375 if (udata == NULL) {
1376 status = ocrdma_alloc_wr_id_tbl(qp);
1381 status = ocrdma_add_qpn_map(dev, qp);
1384 ocrdma_set_qp_db(dev, qp, pd);
1386 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1388 (attrs->srq != NULL));
1392 ocrdma_store_gsi_qp_cq(dev, attrs);
1393 qp->ibqp.qp_num = qp->id;
1394 mutex_unlock(&dev->dev_lock);
1398 ocrdma_del_qpn_map(dev, qp);
1400 ocrdma_mbx_destroy_qp(dev, qp);
1402 mutex_unlock(&dev->dev_lock);
1403 kfree(qp->wqe_wr_id_tbl);
1404 kfree(qp->rqe_wr_id_tbl);
1406 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1408 return ERR_PTR(status);
1411 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1415 struct ocrdma_qp *qp;
1416 struct ocrdma_dev *dev;
1417 enum ib_qp_state old_qps;
1419 qp = get_ocrdma_qp(ibqp);
1420 dev = get_ocrdma_dev(ibqp->device);
1421 if (attr_mask & IB_QP_STATE)
1422 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1423 /* if new and previous states are same hw doesn't need to
1428 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1433 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1434 int attr_mask, struct ib_udata *udata)
1436 unsigned long flags;
1437 int status = -EINVAL;
1438 struct ocrdma_qp *qp;
1439 struct ocrdma_dev *dev;
1440 enum ib_qp_state old_qps, new_qps;
1442 qp = get_ocrdma_qp(ibqp);
1443 dev = get_ocrdma_dev(ibqp->device);
1445 /* syncronize with multiple context trying to change, retrive qps */
1446 mutex_lock(&dev->dev_lock);
1447 /* syncronize with wqe, rqe posting and cqe processing contexts */
1448 spin_lock_irqsave(&qp->q_lock, flags);
1449 old_qps = get_ibqp_state(qp->state);
1450 if (attr_mask & IB_QP_STATE)
1451 new_qps = attr->qp_state;
1454 spin_unlock_irqrestore(&qp->q_lock, flags);
1456 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1457 IB_LINK_LAYER_ETHERNET)) {
1458 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1459 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1460 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1465 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1469 mutex_unlock(&dev->dev_lock);
1473 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1491 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1493 int ib_qp_acc_flags = 0;
1495 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1496 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1497 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1498 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1499 return ib_qp_acc_flags;
1502 int ocrdma_query_qp(struct ib_qp *ibqp,
1503 struct ib_qp_attr *qp_attr,
1504 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1508 struct ocrdma_qp_params params;
1509 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1510 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1512 memset(¶ms, 0, sizeof(params));
1513 mutex_lock(&dev->dev_lock);
1514 status = ocrdma_mbx_query_qp(dev, qp, ¶ms);
1515 mutex_unlock(&dev->dev_lock);
1518 if (qp->qp_type == IB_QPT_UD)
1519 qp_attr->qkey = params.qkey;
1521 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1522 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1523 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1524 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1525 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1526 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1527 qp_attr->dest_qp_num =
1528 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1530 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1531 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1532 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1533 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1534 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1535 qp_attr->cap.max_inline_data = qp->max_inline_data;
1536 qp_init_attr->cap = qp_attr->cap;
1537 memcpy(&qp_attr->ah_attr.grh.dgid, ¶ms.dgid[0],
1538 sizeof(params.dgid));
1539 qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1540 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1541 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1542 qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1543 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1544 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1545 qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1546 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1547 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1549 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1550 qp_attr->ah_attr.port_num = 1;
1551 qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1552 OCRDMA_QP_PARAMS_SL_MASK) >>
1553 OCRDMA_QP_PARAMS_SL_SHIFT;
1554 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1555 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1556 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1557 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1558 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1559 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1560 qp_attr->retry_cnt =
1561 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1562 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1563 qp_attr->min_rnr_timer = 0;
1564 qp_attr->pkey_index = 0;
1565 qp_attr->port_num = 1;
1566 qp_attr->ah_attr.src_path_bits = 0;
1567 qp_attr->ah_attr.static_rate = 0;
1568 qp_attr->alt_pkey_index = 0;
1569 qp_attr->alt_port_num = 0;
1570 qp_attr->alt_timeout = 0;
1571 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1572 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1573 OCRDMA_QP_PARAMS_STATE_SHIFT;
1574 qp_attr->qp_state = get_ibqp_state(qp_state);
1575 qp_attr->cur_qp_state = qp_attr->qp_state;
1576 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1577 qp_attr->max_dest_rd_atomic =
1578 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1579 qp_attr->max_rd_atomic =
1580 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1581 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1582 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1583 /* Sync driver QP state with FW */
1584 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1589 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1591 unsigned int i = idx / 32;
1592 u32 mask = (1U << (idx % 32));
1594 srq->idx_bit_fields[i] ^= mask;
1597 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1599 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1602 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1604 return (qp->sq.tail == qp->sq.head);
1607 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1609 return (qp->rq.tail == qp->rq.head);
1612 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1614 return q->va + (q->head * q->entry_size);
1617 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1620 return q->va + (idx * q->entry_size);
1623 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1625 q->head = (q->head + 1) & q->max_wqe_idx;
1628 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1630 q->tail = (q->tail + 1) & q->max_wqe_idx;
1633 /* discard the cqe for a given QP */
1634 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1636 unsigned long cq_flags;
1637 unsigned long flags;
1638 int discard_cnt = 0;
1639 u32 cur_getp, stop_getp;
1640 struct ocrdma_cqe *cqe;
1641 u32 qpn = 0, wqe_idx = 0;
1643 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1645 /* traverse through the CQEs in the hw CQ,
1646 * find the matching CQE for a given qp,
1647 * mark the matching one discarded by clearing qpn.
1648 * ring the doorbell in the poll_cq() as
1649 * we don't complete out of order cqe.
1652 cur_getp = cq->getp;
1653 /* find upto when do we reap the cq. */
1654 stop_getp = cur_getp;
1656 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1659 cqe = cq->va + cur_getp;
1660 /* if (a) done reaping whole hw cq, or
1661 * (b) qp_xq becomes empty.
1664 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1665 /* if previously discarded cqe found, skip that too. */
1666 /* check for matching qp */
1667 if (qpn == 0 || qpn != qp->id)
1670 if (is_cqe_for_sq(cqe)) {
1671 ocrdma_hwq_inc_tail(&qp->sq);
1674 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1675 OCRDMA_CQE_BUFTAG_SHIFT) &
1676 qp->srq->rq.max_wqe_idx;
1679 spin_lock_irqsave(&qp->srq->q_lock, flags);
1680 ocrdma_hwq_inc_tail(&qp->srq->rq);
1681 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1682 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1685 ocrdma_hwq_inc_tail(&qp->rq);
1688 /* mark cqe discarded so that it is not picked up later
1694 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1695 } while (cur_getp != stop_getp);
1696 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1699 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1702 unsigned long flags;
1703 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1704 /* sync with any active CQ poll */
1706 spin_lock_irqsave(&dev->flush_q_lock, flags);
1707 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1709 list_del(&qp->sq_entry);
1711 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1713 list_del(&qp->rq_entry);
1715 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1718 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1720 struct ocrdma_pd *pd;
1721 struct ocrdma_qp *qp;
1722 struct ocrdma_dev *dev;
1723 struct ib_qp_attr attrs;
1725 unsigned long flags;
1727 qp = get_ocrdma_qp(ibqp);
1728 dev = get_ocrdma_dev(ibqp->device);
1732 /* change the QP state to ERROR */
1733 if (qp->state != OCRDMA_QPS_RST) {
1734 attrs.qp_state = IB_QPS_ERR;
1735 attr_mask = IB_QP_STATE;
1736 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1738 /* ensure that CQEs for newly created QP (whose id may be same with
1739 * one which just getting destroyed are same), dont get
1740 * discarded until the old CQEs are discarded.
1742 mutex_lock(&dev->dev_lock);
1743 (void) ocrdma_mbx_destroy_qp(dev, qp);
1746 * acquire CQ lock while destroy is in progress, in order to
1747 * protect against proessing in-flight CQEs for this QP.
1749 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1750 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1751 spin_lock(&qp->rq_cq->cq_lock);
1753 ocrdma_del_qpn_map(dev, qp);
1755 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1756 spin_unlock(&qp->rq_cq->cq_lock);
1757 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1760 ocrdma_discard_cqes(qp, qp->sq_cq);
1761 ocrdma_discard_cqes(qp, qp->rq_cq);
1763 mutex_unlock(&dev->dev_lock);
1766 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1767 PAGE_ALIGN(qp->sq.len));
1769 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1770 PAGE_ALIGN(qp->rq.len));
1773 ocrdma_del_flush_qp(qp);
1775 kfree(qp->wqe_wr_id_tbl);
1776 kfree(qp->rqe_wr_id_tbl);
1781 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1782 struct ib_udata *udata)
1785 struct ocrdma_create_srq_uresp uresp;
1787 memset(&uresp, 0, sizeof(uresp));
1788 uresp.rq_dbid = srq->rq.dbid;
1789 uresp.num_rq_pages = 1;
1790 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1791 uresp.rq_page_size = srq->rq.len;
1792 uresp.db_page_addr = dev->nic_info.unmapped_db +
1793 (srq->pd->id * dev->nic_info.db_page_size);
1794 uresp.db_page_size = dev->nic_info.db_page_size;
1795 uresp.num_rqe_allocated = srq->rq.max_cnt;
1796 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1797 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1798 uresp.db_shift = 24;
1800 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1801 uresp.db_shift = 16;
1804 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1807 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1808 uresp.rq_page_size);
1814 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1815 struct ib_srq_init_attr *init_attr,
1816 struct ib_udata *udata)
1818 int status = -ENOMEM;
1819 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1820 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1821 struct ocrdma_srq *srq;
1823 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1824 return ERR_PTR(-EINVAL);
1825 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1826 return ERR_PTR(-EINVAL);
1828 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1830 return ERR_PTR(status);
1832 spin_lock_init(&srq->q_lock);
1834 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1835 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1839 if (udata == NULL) {
1840 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1842 if (srq->rqe_wr_id_tbl == NULL)
1845 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1846 (srq->rq.max_cnt % 32 ? 1 : 0);
1847 srq->idx_bit_fields =
1848 kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1849 if (srq->idx_bit_fields == NULL)
1851 memset(srq->idx_bit_fields, 0xff,
1852 srq->bit_fields_len * sizeof(u32));
1855 if (init_attr->attr.srq_limit) {
1856 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1862 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1870 ocrdma_mbx_destroy_srq(dev, srq);
1872 kfree(srq->rqe_wr_id_tbl);
1873 kfree(srq->idx_bit_fields);
1875 return ERR_PTR(status);
1878 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1879 struct ib_srq_attr *srq_attr,
1880 enum ib_srq_attr_mask srq_attr_mask,
1881 struct ib_udata *udata)
1884 struct ocrdma_srq *srq;
1886 srq = get_ocrdma_srq(ibsrq);
1887 if (srq_attr_mask & IB_SRQ_MAX_WR)
1890 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1894 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1897 struct ocrdma_srq *srq;
1899 srq = get_ocrdma_srq(ibsrq);
1900 status = ocrdma_mbx_query_srq(srq, srq_attr);
1904 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1907 struct ocrdma_srq *srq;
1908 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1910 srq = get_ocrdma_srq(ibsrq);
1912 status = ocrdma_mbx_destroy_srq(dev, srq);
1915 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1916 PAGE_ALIGN(srq->rq.len));
1918 kfree(srq->idx_bit_fields);
1919 kfree(srq->rqe_wr_id_tbl);
1924 /* unprivileged verbs and their support functions. */
1925 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1926 struct ocrdma_hdr_wqe *hdr,
1927 struct ib_send_wr *wr)
1929 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1930 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1931 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1933 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1934 if (qp->qp_type == IB_QPT_GSI)
1935 ud_hdr->qkey = qp->qkey;
1937 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1938 ud_hdr->rsvd_ahid = ah->id;
1939 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1940 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1943 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1944 struct ocrdma_sge *sge, int num_sge,
1945 struct ib_sge *sg_list)
1949 for (i = 0; i < num_sge; i++) {
1950 sge[i].lrkey = sg_list[i].lkey;
1951 sge[i].addr_lo = sg_list[i].addr;
1952 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1953 sge[i].len = sg_list[i].length;
1954 hdr->total_len += sg_list[i].length;
1957 memset(sge, 0, sizeof(*sge));
1960 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1962 uint32_t total_len = 0, i;
1964 for (i = 0; i < num_sge; i++)
1965 total_len += sg_list[i].length;
1970 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1971 struct ocrdma_hdr_wqe *hdr,
1972 struct ocrdma_sge *sge,
1973 struct ib_send_wr *wr, u32 wqe_size)
1978 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1979 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1980 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1981 pr_err("%s() supported_len=0x%x,\n"
1982 " unsupported len req=0x%x\n", __func__,
1983 qp->max_inline_data, hdr->total_len);
1986 dpp_addr = (char *)sge;
1987 for (i = 0; i < wr->num_sge; i++) {
1989 (void *)(unsigned long)wr->sg_list[i].addr,
1990 wr->sg_list[i].length);
1991 dpp_addr += wr->sg_list[i].length;
1994 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1995 if (0 == hdr->total_len)
1996 wqe_size += sizeof(struct ocrdma_sge);
1997 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1999 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2001 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2003 wqe_size += sizeof(struct ocrdma_sge);
2004 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2006 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2010 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2011 struct ib_send_wr *wr)
2014 struct ocrdma_sge *sge;
2015 u32 wqe_size = sizeof(*hdr);
2017 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2018 ocrdma_build_ud_hdr(qp, hdr, wr);
2019 sge = (struct ocrdma_sge *)(hdr + 2);
2020 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2022 sge = (struct ocrdma_sge *)(hdr + 1);
2025 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2029 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2030 struct ib_send_wr *wr)
2033 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2034 struct ocrdma_sge *sge = ext_rw + 1;
2035 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2037 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2040 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2041 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2042 ext_rw->lrkey = wr->wr.rdma.rkey;
2043 ext_rw->len = hdr->total_len;
2047 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2048 struct ib_send_wr *wr)
2050 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2051 struct ocrdma_sge *sge = ext_rw + 1;
2052 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2053 sizeof(struct ocrdma_hdr_wqe);
2055 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2056 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2057 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2058 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2060 ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2061 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2062 ext_rw->lrkey = wr->wr.rdma.rkey;
2063 ext_rw->len = hdr->total_len;
2066 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2067 struct ocrdma_hw_mr *hwmr)
2072 struct ocrdma_pbe *pbe;
2074 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2077 /* go through the OS phy regions & fill hw pbe entries into pbls. */
2078 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2079 /* number of pbes can be more for one OS buf, when
2080 * buffers are of different sizes.
2081 * split the ib_buf to one or more pbes.
2083 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2084 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2085 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2089 /* if the pbl is full storing the pbes,
2092 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2094 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2100 static int get_encoded_page_size(int pg_sz)
2102 /* Max size is 256M 4096 << 16 */
2105 if (pg_sz == (4096 << i))
2111 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2112 struct ib_send_wr *wr)
2115 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2116 struct ocrdma_mr *mr;
2117 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2118 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2120 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2122 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2125 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2126 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2128 if (wr->wr.fast_reg.page_list_len == 0)
2130 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2131 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2132 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2133 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2134 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2135 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2136 hdr->lkey = wr->wr.fast_reg.rkey;
2137 hdr->total_len = wr->wr.fast_reg.length;
2139 fbo = wr->wr.fast_reg.iova_start -
2140 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2142 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2143 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2144 fast_reg->fbo_hi = upper_32_bits(fbo);
2145 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2146 fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2147 fast_reg->size_sge =
2148 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2149 mr = (struct ocrdma_mr *) (unsigned long)
2150 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2151 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2155 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2157 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2159 iowrite32(val, qp->sq_db);
2162 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2163 struct ib_send_wr **bad_wr)
2166 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2167 struct ocrdma_hdr_wqe *hdr;
2168 unsigned long flags;
2170 spin_lock_irqsave(&qp->q_lock, flags);
2171 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2172 spin_unlock_irqrestore(&qp->q_lock, flags);
2178 if (qp->qp_type == IB_QPT_UD &&
2179 (wr->opcode != IB_WR_SEND &&
2180 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2185 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2186 wr->num_sge > qp->sq.max_sges) {
2191 hdr = ocrdma_hwq_head(&qp->sq);
2193 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2194 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2195 if (wr->send_flags & IB_SEND_FENCE)
2197 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2198 if (wr->send_flags & IB_SEND_SOLICITED)
2200 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2202 switch (wr->opcode) {
2203 case IB_WR_SEND_WITH_IMM:
2204 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2205 hdr->immdt = ntohl(wr->ex.imm_data);
2207 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2208 ocrdma_build_send(qp, hdr, wr);
2210 case IB_WR_SEND_WITH_INV:
2211 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2212 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2213 hdr->lkey = wr->ex.invalidate_rkey;
2214 status = ocrdma_build_send(qp, hdr, wr);
2216 case IB_WR_RDMA_WRITE_WITH_IMM:
2217 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2218 hdr->immdt = ntohl(wr->ex.imm_data);
2219 case IB_WR_RDMA_WRITE:
2220 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2221 status = ocrdma_build_write(qp, hdr, wr);
2223 case IB_WR_RDMA_READ:
2224 ocrdma_build_read(qp, hdr, wr);
2226 case IB_WR_LOCAL_INV:
2228 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2229 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2230 sizeof(struct ocrdma_sge)) /
2231 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2232 hdr->lkey = wr->ex.invalidate_rkey;
2234 case IB_WR_FAST_REG_MR:
2235 status = ocrdma_build_fr(qp, hdr, wr);
2245 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2246 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2248 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2249 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2250 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2251 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2252 /* make sure wqe is written before adapter can access it */
2254 /* inform hw to start processing it */
2255 ocrdma_ring_sq_db(qp);
2257 /* update pointer, counter for next wr */
2258 ocrdma_hwq_inc_head(&qp->sq);
2261 spin_unlock_irqrestore(&qp->q_lock, flags);
2265 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2267 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2269 iowrite32(val, qp->rq_db);
2272 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2276 struct ocrdma_sge *sge;
2278 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2280 wqe_size = sizeof(*sge) + sizeof(*rqe);
2282 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2283 OCRDMA_WQE_SIZE_SHIFT);
2284 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2285 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2287 rqe->rsvd_tag = tag;
2288 sge = (struct ocrdma_sge *)(rqe + 1);
2289 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2290 ocrdma_cpu_to_le32(rqe, wqe_size);
2293 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2294 struct ib_recv_wr **bad_wr)
2297 unsigned long flags;
2298 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2299 struct ocrdma_hdr_wqe *rqe;
2301 spin_lock_irqsave(&qp->q_lock, flags);
2302 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2303 spin_unlock_irqrestore(&qp->q_lock, flags);
2308 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2309 wr->num_sge > qp->rq.max_sges) {
2314 rqe = ocrdma_hwq_head(&qp->rq);
2315 ocrdma_build_rqe(rqe, wr, 0);
2317 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2318 /* make sure rqe is written before adapter can access it */
2321 /* inform hw to start processing it */
2322 ocrdma_ring_rq_db(qp);
2324 /* update pointer, counter for next wr */
2325 ocrdma_hwq_inc_head(&qp->rq);
2328 spin_unlock_irqrestore(&qp->q_lock, flags);
2332 /* cqe for srq's rqe can potentially arrive out of order.
2333 * index gives the entry in the shadow table where to store
2334 * the wr_id. tag/index is returned in cqe to reference back
2337 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2342 for (row = 0; row < srq->bit_fields_len; row++) {
2343 if (srq->idx_bit_fields[row]) {
2344 indx = ffs(srq->idx_bit_fields[row]);
2345 indx = (row * 32) + (indx - 1);
2346 if (indx >= srq->rq.max_cnt)
2348 ocrdma_srq_toggle_bit(srq, indx);
2353 if (row == srq->bit_fields_len)
2355 return indx + 1; /* Use from index 1 */
2358 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2360 u32 val = srq->rq.dbid | (1 << 16);
2362 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2365 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2366 struct ib_recv_wr **bad_wr)
2369 unsigned long flags;
2370 struct ocrdma_srq *srq;
2371 struct ocrdma_hdr_wqe *rqe;
2374 srq = get_ocrdma_srq(ibsrq);
2376 spin_lock_irqsave(&srq->q_lock, flags);
2378 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2379 wr->num_sge > srq->rq.max_sges) {
2384 tag = ocrdma_srq_get_idx(srq);
2385 rqe = ocrdma_hwq_head(&srq->rq);
2386 ocrdma_build_rqe(rqe, wr, tag);
2388 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2389 /* make sure rqe is written before adapter can perform DMA */
2391 /* inform hw to start processing it */
2392 ocrdma_ring_srq_db(srq);
2393 /* update pointer, counter for next wr */
2394 ocrdma_hwq_inc_head(&srq->rq);
2397 spin_unlock_irqrestore(&srq->q_lock, flags);
2401 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2403 enum ib_wc_status ibwc_status;
2406 case OCRDMA_CQE_GENERAL_ERR:
2407 ibwc_status = IB_WC_GENERAL_ERR;
2409 case OCRDMA_CQE_LOC_LEN_ERR:
2410 ibwc_status = IB_WC_LOC_LEN_ERR;
2412 case OCRDMA_CQE_LOC_QP_OP_ERR:
2413 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2415 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2416 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2418 case OCRDMA_CQE_LOC_PROT_ERR:
2419 ibwc_status = IB_WC_LOC_PROT_ERR;
2421 case OCRDMA_CQE_WR_FLUSH_ERR:
2422 ibwc_status = IB_WC_WR_FLUSH_ERR;
2424 case OCRDMA_CQE_MW_BIND_ERR:
2425 ibwc_status = IB_WC_MW_BIND_ERR;
2427 case OCRDMA_CQE_BAD_RESP_ERR:
2428 ibwc_status = IB_WC_BAD_RESP_ERR;
2430 case OCRDMA_CQE_LOC_ACCESS_ERR:
2431 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2433 case OCRDMA_CQE_REM_INV_REQ_ERR:
2434 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2436 case OCRDMA_CQE_REM_ACCESS_ERR:
2437 ibwc_status = IB_WC_REM_ACCESS_ERR;
2439 case OCRDMA_CQE_REM_OP_ERR:
2440 ibwc_status = IB_WC_REM_OP_ERR;
2442 case OCRDMA_CQE_RETRY_EXC_ERR:
2443 ibwc_status = IB_WC_RETRY_EXC_ERR;
2445 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2446 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2448 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2449 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2451 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2452 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2454 case OCRDMA_CQE_REM_ABORT_ERR:
2455 ibwc_status = IB_WC_REM_ABORT_ERR;
2457 case OCRDMA_CQE_INV_EECN_ERR:
2458 ibwc_status = IB_WC_INV_EECN_ERR;
2460 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2461 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2463 case OCRDMA_CQE_FATAL_ERR:
2464 ibwc_status = IB_WC_FATAL_ERR;
2466 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2467 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2470 ibwc_status = IB_WC_GENERAL_ERR;
2476 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2479 struct ocrdma_hdr_wqe *hdr;
2480 struct ocrdma_sge *rw;
2483 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2485 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2486 /* Undo the hdr->cw swap */
2487 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2490 ibwc->opcode = IB_WC_RDMA_WRITE;
2493 rw = (struct ocrdma_sge *)(hdr + 1);
2494 ibwc->opcode = IB_WC_RDMA_READ;
2495 ibwc->byte_len = rw->len;
2498 ibwc->opcode = IB_WC_SEND;
2501 ibwc->opcode = IB_WC_FAST_REG_MR;
2503 case OCRDMA_LKEY_INV:
2504 ibwc->opcode = IB_WC_LOCAL_INV;
2507 ibwc->status = IB_WC_GENERAL_ERR;
2508 pr_err("%s() invalid opcode received = 0x%x\n",
2509 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2514 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2515 struct ocrdma_cqe *cqe)
2517 if (is_cqe_for_sq(cqe)) {
2518 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2519 cqe->flags_status_srcqpn) &
2520 ~OCRDMA_CQE_STATUS_MASK);
2521 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2522 cqe->flags_status_srcqpn) |
2523 (OCRDMA_CQE_WR_FLUSH_ERR <<
2524 OCRDMA_CQE_STATUS_SHIFT));
2526 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2527 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2528 cqe->flags_status_srcqpn) &
2529 ~OCRDMA_CQE_UD_STATUS_MASK);
2530 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2531 cqe->flags_status_srcqpn) |
2532 (OCRDMA_CQE_WR_FLUSH_ERR <<
2533 OCRDMA_CQE_UD_STATUS_SHIFT));
2535 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2536 cqe->flags_status_srcqpn) &
2537 ~OCRDMA_CQE_STATUS_MASK);
2538 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2539 cqe->flags_status_srcqpn) |
2540 (OCRDMA_CQE_WR_FLUSH_ERR <<
2541 OCRDMA_CQE_STATUS_SHIFT));
2546 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2547 struct ocrdma_qp *qp, int status)
2549 bool expand = false;
2552 ibwc->qp = &qp->ibqp;
2553 ibwc->status = ocrdma_to_ibwc_err(status);
2555 ocrdma_flush_qp(qp);
2556 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2558 /* if wqe/rqe pending for which cqe needs to be returned,
2559 * trigger inflating it.
2561 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2563 ocrdma_set_cqe_status_flushed(qp, cqe);
2568 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2569 struct ocrdma_qp *qp, int status)
2571 ibwc->opcode = IB_WC_RECV;
2572 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2573 ocrdma_hwq_inc_tail(&qp->rq);
2575 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2578 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2579 struct ocrdma_qp *qp, int status)
2581 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2582 ocrdma_hwq_inc_tail(&qp->sq);
2584 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2588 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2589 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2590 bool *polled, bool *stop)
2593 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2594 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2595 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2596 if (status < OCRDMA_MAX_CQE_ERR)
2597 atomic_inc(&dev->cqe_err_stats[status]);
2599 /* when hw sq is empty, but rq is not empty, so we continue
2600 * to keep the cqe in order to get the cq event again.
2602 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2603 /* when cq for rq and sq is same, it is safe to return
2604 * flush cqe for RQEs.
2606 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2608 status = OCRDMA_CQE_WR_FLUSH_ERR;
2609 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2611 /* stop processing further cqe as this cqe is used for
2612 * triggering cq event on buddy cq of RQ.
2613 * When QP is destroyed, this cqe will be removed
2614 * from the cq's hardware q.
2620 } else if (is_hw_sq_empty(qp)) {
2627 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2632 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2633 struct ocrdma_cqe *cqe,
2634 struct ib_wc *ibwc, bool *polled)
2636 bool expand = false;
2637 int tail = qp->sq.tail;
2640 if (!qp->wqe_wr_id_tbl[tail].signaled) {
2641 *polled = false; /* WC cannot be consumed yet */
2643 ibwc->status = IB_WC_SUCCESS;
2645 ibwc->qp = &qp->ibqp;
2646 ocrdma_update_wc(qp, ibwc, tail);
2649 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2650 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2651 if (tail != wqe_idx)
2652 expand = true; /* Coalesced CQE can't be consumed yet */
2654 ocrdma_hwq_inc_tail(&qp->sq);
2658 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2659 struct ib_wc *ibwc, bool *polled, bool *stop)
2664 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2665 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2667 if (status == OCRDMA_CQE_SUCCESS)
2668 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2670 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2674 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2678 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2679 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2680 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2681 OCRDMA_CQE_SRCQP_MASK;
2682 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2683 OCRDMA_CQE_PKEY_MASK;
2684 ibwc->wc_flags = IB_WC_GRH;
2685 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2686 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2690 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2691 struct ocrdma_cqe *cqe,
2692 struct ocrdma_qp *qp)
2694 unsigned long flags;
2695 struct ocrdma_srq *srq;
2698 srq = get_ocrdma_srq(qp->ibqp.srq);
2699 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2700 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2704 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2705 spin_lock_irqsave(&srq->q_lock, flags);
2706 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2707 spin_unlock_irqrestore(&srq->q_lock, flags);
2708 ocrdma_hwq_inc_tail(&srq->rq);
2711 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2712 struct ib_wc *ibwc, bool *polled, bool *stop,
2716 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2718 if (status < OCRDMA_MAX_CQE_ERR)
2719 atomic_inc(&dev->cqe_err_stats[status]);
2721 /* when hw_rq is empty, but wq is not empty, so continue
2722 * to keep the cqe to get the cq event again.
2724 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2725 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2727 status = OCRDMA_CQE_WR_FLUSH_ERR;
2728 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2734 } else if (is_hw_rq_empty(qp)) {
2741 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2746 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2747 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2749 ibwc->opcode = IB_WC_RECV;
2750 ibwc->qp = &qp->ibqp;
2751 ibwc->status = IB_WC_SUCCESS;
2753 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2754 ocrdma_update_ud_rcqe(ibwc, cqe);
2756 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2758 if (is_cqe_imm(cqe)) {
2759 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2760 ibwc->wc_flags |= IB_WC_WITH_IMM;
2761 } else if (is_cqe_wr_imm(cqe)) {
2762 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2763 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2764 ibwc->wc_flags |= IB_WC_WITH_IMM;
2765 } else if (is_cqe_invalidated(cqe)) {
2766 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2767 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2770 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2772 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2773 ocrdma_hwq_inc_tail(&qp->rq);
2777 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2778 struct ib_wc *ibwc, bool *polled, bool *stop)
2781 bool expand = false;
2784 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2785 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2786 OCRDMA_CQE_UD_STATUS_MASK) >>
2787 OCRDMA_CQE_UD_STATUS_SHIFT;
2789 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2790 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2793 if (status == OCRDMA_CQE_SUCCESS) {
2795 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2797 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2803 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2806 if (cq->phase_change) {
2808 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2810 /* clear valid bit */
2811 cqe->flags_status_srcqpn = 0;
2815 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2820 bool expand = false;
2821 int polled_hw_cqes = 0;
2822 struct ocrdma_qp *qp = NULL;
2823 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2824 struct ocrdma_cqe *cqe;
2825 u16 cur_getp; bool polled = false; bool stop = false;
2827 cur_getp = cq->getp;
2828 while (num_entries) {
2829 cqe = cq->va + cur_getp;
2830 /* check whether valid cqe or not */
2831 if (!is_cqe_valid(cq, cqe))
2833 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2834 /* ignore discarded cqe */
2837 qp = dev->qp_tbl[qpn];
2840 if (is_cqe_for_sq(cqe)) {
2841 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2844 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2851 /* clear qpn to avoid duplicate processing by discard_cqe() */
2854 polled_hw_cqes += 1;
2855 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2856 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2866 cq->getp = cur_getp;
2867 if (cq->deferred_arm) {
2868 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2870 cq->deferred_arm = false;
2871 cq->deferred_sol = false;
2873 /* We need to pop the CQE. No need to arm */
2874 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2876 cq->deferred_sol = false;
2882 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2883 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2884 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2888 while (num_entries) {
2889 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2891 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2892 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2893 ocrdma_hwq_inc_tail(&qp->sq);
2894 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2895 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2896 ocrdma_hwq_inc_tail(&qp->rq);
2901 ibwc->status = IB_WC_WR_FLUSH_ERR;
2909 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2911 int cqes_to_poll = num_entries;
2912 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2913 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2914 int num_os_cqe = 0, err_cqes = 0;
2915 struct ocrdma_qp *qp;
2916 unsigned long flags;
2918 /* poll cqes from adapter CQ */
2919 spin_lock_irqsave(&cq->cq_lock, flags);
2920 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2921 spin_unlock_irqrestore(&cq->cq_lock, flags);
2922 cqes_to_poll -= num_os_cqe;
2925 wc = wc + num_os_cqe;
2926 /* adapter returns single error cqe when qp moves to
2927 * error state. So insert error cqes with wc_status as
2928 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2929 * respectively which uses this CQ.
2931 spin_lock_irqsave(&dev->flush_q_lock, flags);
2932 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2933 if (cqes_to_poll == 0)
2935 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2936 cqes_to_poll -= err_cqes;
2937 num_os_cqe += err_cqes;
2940 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2945 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2947 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2948 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2950 unsigned long flags;
2951 bool arm_needed = false, sol_needed = false;
2955 spin_lock_irqsave(&cq->cq_lock, flags);
2956 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2958 if (cq_flags & IB_CQ_SOLICITED)
2961 if (cq->first_arm) {
2962 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2963 cq->first_arm = false;
2966 cq->deferred_arm = true;
2967 cq->deferred_sol = sol_needed;
2968 spin_unlock_irqrestore(&cq->cq_lock, flags);
2973 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2976 struct ocrdma_mr *mr;
2977 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2978 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2980 if (max_page_list_len > dev->attr.max_pages_per_frmr)
2981 return ERR_PTR(-EINVAL);
2983 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2985 return ERR_PTR(-ENOMEM);
2987 status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2991 mr->hwmr.remote_rd = 0;
2992 mr->hwmr.remote_wr = 0;
2993 mr->hwmr.local_rd = 0;
2994 mr->hwmr.local_wr = 0;
2995 mr->hwmr.mw_bind = 0;
2996 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2999 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3002 mr->ibmr.rkey = mr->hwmr.lkey;
3003 mr->ibmr.lkey = mr->hwmr.lkey;
3004 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3008 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3011 return ERR_PTR(-ENOMEM);
3014 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3018 struct ib_fast_reg_page_list *frmr_list;
3021 size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3022 frmr_list = kzalloc(size, GFP_KERNEL);
3024 return ERR_PTR(-ENOMEM);
3025 frmr_list->page_list = (u64 *)(frmr_list + 1);
3029 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3034 #define MAX_KERNEL_PBE_SIZE 65536
3035 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3036 int buf_cnt, u32 *pbe_size)
3041 *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3042 *pbe_size = roundup_pow_of_two(*pbe_size);
3044 /* find the smallest PBE size that we can have */
3045 for (i = 0; i < buf_cnt; i++) {
3046 /* first addr may not be page aligned, so ignore checking */
3047 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3048 (buf_list[i].size & ~PAGE_MASK))) {
3052 /* if configured PBE size is greater then the chosen one,
3053 * reduce the PBE size.
3055 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3056 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3057 buf_size = roundup_pow_of_two(buf_size);
3058 if (*pbe_size > buf_size)
3059 *pbe_size = buf_size;
3061 total_size += buf_size;
3063 *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3064 (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3066 /* num_pbes = total_size / (*pbe_size); this is implemented below. */
3068 return total_size >> ilog2(*pbe_size);
3071 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3072 u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3073 struct ocrdma_hw_mr *hwmr)
3077 int pbes_per_buf = 0;
3080 struct ocrdma_pbe *pbe;
3081 int total_num_pbes = 0;
3083 if (!hwmr->num_pbes)
3086 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3089 /* go through the OS phy regions & fill hw pbe entries into pbls. */
3090 for (i = 0; i < ib_buf_cnt; i++) {
3091 buf_addr = buf_list[i].addr;
3093 roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3095 hwmr->len += buf_list[i].size;
3096 /* number of pbes can be more for one OS buf, when
3097 * buffers are of different sizes.
3098 * split the ib_buf to one or more pbes.
3100 for (idx = 0; idx < pbes_per_buf; idx++) {
3101 /* we program always page aligned addresses,
3102 * first unaligned address is taken care by fbo.
3105 /* for non zero fbo, assign the
3106 * start of the page.
3109 cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3111 cpu_to_le32((u32) upper_32_bits(buf_addr));
3114 cpu_to_le32((u32) (buf_addr & 0xffffffff));
3116 cpu_to_le32((u32) upper_32_bits(buf_addr));
3118 buf_addr += pbe_size;
3120 total_num_pbes += 1;
3123 if (total_num_pbes == hwmr->num_pbes)
3125 /* if the pbl is full storing the pbes,
3128 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3130 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3139 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3140 struct ib_phys_buf *buf_list,
3141 int buf_cnt, int acc, u64 *iova_start)
3143 int status = -ENOMEM;
3144 struct ocrdma_mr *mr;
3145 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3146 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3150 if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3151 return ERR_PTR(-EINVAL);
3153 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3155 return ERR_PTR(status);
3157 num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3158 if (num_pbes == 0) {
3162 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3166 mr->hwmr.pbe_size = pbe_size;
3167 mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3168 mr->hwmr.va = *iova_start;
3169 mr->hwmr.local_rd = 1;
3170 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3171 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3172 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3173 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3174 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3176 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3179 build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3181 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3185 mr->ibmr.lkey = mr->hwmr.lkey;
3186 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3187 mr->ibmr.rkey = mr->hwmr.lkey;
3191 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3194 return ERR_PTR(status);